Package gluon :: Module dal
[hide private]
[frames] | no frames]

Source Code for Module gluon.dal

    1  #!/bin/env python 
    2  # -*- coding: utf-8 -*- 
    3   
    4  """ 
    5  This file is part of the web2py Web Framework 
    6  Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> 
    7  License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) 
    8   
    9  Thanks to 
   10      * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support 
   11      * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support 
   12      * Denes 
   13      * Chris Clark 
   14      * clach05 
   15      * Denes Lengyel 
   16      * and many others who have contributed to current and previous versions 
   17   
   18  This file contains the DAL support for many relational databases, 
   19  including: 
   20  - SQLite & SpatiaLite 
   21  - MySQL 
   22  - Postgres 
   23  - Firebird 
   24  - Oracle 
   25  - MS SQL 
   26  - DB2 
   27  - Interbase 
   28  - Ingres 
   29  - Informix (9+ and SE) 
   30  - SapDB (experimental) 
   31  - Cubrid (experimental) 
   32  - CouchDB (experimental) 
   33  - MongoDB (in progress) 
   34  - Google:nosql 
   35  - Google:sql 
   36  - Teradata 
   37  - IMAP (experimental) 
   38   
   39  Example of usage: 
   40   
   41  >>> # from dal import DAL, Field 
   42   
   43  ### create DAL connection (and create DB if it doesn't exist) 
   44  >>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'), 
   45  ... folder=None) 
   46   
   47  ### define a table 'person' (create/alter as necessary) 
   48  >>> person = db.define_table('person',Field('name','string')) 
   49   
   50  ### insert a record 
   51  >>> id = person.insert(name='James') 
   52   
   53  ### retrieve it by id 
   54  >>> james = person(id) 
   55   
   56  ### retrieve it by name 
   57  >>> james = person(name='James') 
   58   
   59  ### retrieve it by arbitrary query 
   60  >>> query = (person.name=='James') & (person.name.startswith('J')) 
   61  >>> james = db(query).select(person.ALL)[0] 
   62   
   63  ### update one record 
   64  >>> james.update_record(name='Jim') 
   65  <Row {'id': 1, 'name': 'Jim'}> 
   66   
   67  ### update multiple records by query 
   68  >>> db(person.name.like('J%')).update(name='James') 
   69  1 
   70   
   71  ### delete records by query 
   72  >>> db(person.name.lower() == 'jim').delete() 
   73  0 
   74   
   75  ### retrieve multiple records (rows) 
   76  >>> people = db(person).select(orderby=person.name, 
   77  ... groupby=person.name, limitby=(0,100)) 
   78   
   79  ### further filter them 
   80  >>> james = people.find(lambda row: row.name == 'James').first() 
   81  >>> print james.id, james.name 
   82  1 James 
   83   
   84  ### check aggregates 
   85  >>> counter = person.id.count() 
   86  >>> print db(person).select(counter).first()(counter) 
   87  1 
   88   
   89  ### delete one record 
   90  >>> james.delete_record() 
   91  1 
   92   
   93  ### delete (drop) entire database table 
   94  >>> person.drop() 
   95   
   96  Supported field types: 
   97  id string text boolean integer double decimal password upload 
   98  blob time date datetime 
   99   
  100  Supported DAL URI strings: 
  101  'sqlite://test.db' 
  102  'spatialite://test.db' 
  103  'sqlite:memory' 
  104  'spatialite:memory' 
  105  'jdbc:sqlite://test.db' 
  106  'mysql://root:none@localhost/test' 
  107  'postgres://mdipierro:password@localhost/test' 
  108  'postgres:psycopg2://mdipierro:password@localhost/test' 
  109  'postgres:pg8000://mdipierro:password@localhost/test' 
  110  'jdbc:postgres://mdipierro:none@localhost/test' 
  111  'mssql://web2py:none@A64X2/web2py_test' 
  112  'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings 
  113  'oracle://username:password@database' 
  114  'firebird://user:password@server:3050/database' 
  115  'db2://DSN=dsn;UID=user;PWD=pass' 
  116  'firebird://username:password@hostname/database' 
  117  'firebird_embedded://username:password@c://path' 
  118  'informix://user:password@server:3050/database' 
  119  'informixu://user:password@server:3050/database' # unicode informix 
  120  'ingres://database'  # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name' 
  121  'google:datastore' # for google app engine datastore 
  122  'google:sql' # for google app engine with sql (mysql compatible) 
  123  'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental 
  124  'imap://user:password@server:port' # experimental 
  125  'mongodb://user:password@server:port/database' # experimental 
  126   
  127  For more info: 
  128  help(DAL) 
  129  help(Field) 
  130  """ 
  131   
  132  ################################################################################### 
  133  # this file only exposes DAL and Field 
  134  ################################################################################### 
  135   
  136  __all__ = ['DAL', 'Field'] 
  137   
  138  DEFAULTLENGTH = {'string':512, 
  139                   'password':512, 
  140                   'upload':512, 
  141                   'text':2**15, 
  142                   'blob':2**31} 
  143  TIMINGSSIZE = 100 
  144  SPATIALLIBS = { 
  145      'Windows':'libspatialite', 
  146      'Linux':'libspatialite.so', 
  147      'Darwin':'libspatialite.dylib' 
  148      } 
  149  DEFAULT_URI = 'sqlite://dummy.db' 
  150   
  151  import re 
  152  import sys 
  153  import locale 
  154  import os 
  155  import types 
  156  import datetime 
  157  import threading 
  158  import time 
  159  import csv 
  160  import cgi 
  161  import copy 
  162  import socket 
  163  import logging 
  164  import base64 
  165  import shutil 
  166  import marshal 
  167  import decimal 
  168  import struct 
  169  import urllib 
  170  import hashlib 
  171  import uuid 
  172  import glob 
  173  import traceback 
  174  import platform 
  175   
  176  PYTHON_VERSION = sys.version_info[0] 
  177  if PYTHON_VERSION == 2: 
  178      import cPickle as pickle 
  179      import cStringIO as StringIO 
  180      import copy_reg as copyreg 
  181      hashlib_md5 = hashlib.md5 
  182      bytes, unicode = str, unicode 
  183  else: 
  184      import pickle 
  185      from io import StringIO as StringIO 
  186      import copyreg 
  187      long = int 
  188      hashlib_md5 = lambda s: hashlib.md5(bytes(s,'utf8')) 
  189      bytes, unicode = bytes, str 
  190   
  191  CALLABLETYPES = (types.LambdaType, types.FunctionType, 
  192                   types.BuiltinFunctionType, 
  193                   types.MethodType, types.BuiltinMethodType) 
  194   
  195  TABLE_ARGS = set( 
  196      ('migrate','primarykey','fake_migrate','format','redefine', 
  197       'singular','plural','trigger_name','sequence_name', 
  198       'common_filter','polymodel','table_class','on_define','actual_name')) 
  199   
  200  SELECT_ARGS = set( 
  201      ('orderby', 'groupby', 'limitby','required', 'cache', 'left', 
  202       'distinct', 'having', 'join','for_update', 'processor','cacheable', 'orderby_on_limitby')) 
  203   
  204  ogetattr = object.__getattribute__ 
  205  osetattr = object.__setattr__ 
  206  exists = os.path.exists 
  207  pjoin = os.path.join 
  208   
  209  ################################################################################### 
  210  # following checks allow the use of dal without web2py, as a standalone module 
  211  ################################################################################### 
  212  try: 
  213      from utils import web2py_uuid 
  214  except (ImportError, SystemError): 
  215      import uuid 
216 - def web2py_uuid(): return str(uuid.uuid4())
217 218 try: 219 import portalocker 220 have_portalocker = True 221 except ImportError: 222 have_portalocker = False 223 224 try: 225 import serializers 226 have_serializers = True 227 except ImportError: 228 have_serializers = False 229 try: 230 import json as simplejson 231 except ImportError: 232 try: 233 import gluon.contrib.simplejson as simplejson 234 except ImportError: 235 simplejson = None 236 237 try: 238 import validators 239 have_validators = True 240 except (ImportError, SyntaxError): 241 have_validators = False 242 243 LOGGER = logging.getLogger("web2py.dal") 244 DEFAULT = lambda:0 245 246 GLOBAL_LOCKER = threading.RLock() 247 THREAD_LOCAL = threading.local() 248 249 # internal representation of tables with field 250 # <table>.<field>, tables and fields may only be [a-zA-Z0-9_] 251 252 REGEX_TYPE = re.compile('^([\w\_\:]+)') 253 REGEX_DBNAME = re.compile('^(\w+)(\:\w+)*') 254 REGEX_W = re.compile('^\w+$') 255 REGEX_TABLE_DOT_FIELD = re.compile('^(\w+)\.(\w+)$') 256 REGEX_UPLOAD_PATTERN = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)\.(?P<name>\w+)\.\w+$') 257 REGEX_CLEANUP_FN = re.compile('[\'"\s;]+') 258 REGEX_UNPACK = re.compile('(?<!\|)\|(?!\|)') 259 REGEX_PYTHON_KEYWORDS = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') 260 REGEX_SELECT_AS_PARSER = re.compile("\s+AS\s+(\S+)") 261 REGEX_CONST_STRING = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')') 262 REGEX_SEARCH_PATTERN = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$') 263 REGEX_SQUARE_BRACKETS = re.compile('^.+\[.+\]$') 264 REGEX_STORE_PATTERN = re.compile('\.(?P<e>\w{1,5})$') 265 REGEX_QUOTES = re.compile("'[^']*'") 266 REGEX_ALPHANUMERIC = re.compile('^[0-9a-zA-Z]\w*$') 267 REGEX_PASSWORD = re.compile('\://([^:@]*)\:') 268 REGEX_NOPASSWD = re.compile('\/\/[\w\.\-]+[\:\/](.+)(?=@)') # was '(?<=[\:\/])([^:@/]+)(?=@.+)' 269 270 # list of drivers will be built on the fly 271 # and lists only what is available 272 DRIVERS = [] 273 274 try: 275 from new import classobj 276 from google.appengine.ext import db as gae 277 from google.appengine.api import namespace_manager, rdbms 278 from google.appengine.api.datastore_types import Key ### for belongs on ID 279 from google.appengine.ext.db.polymodel import PolyModel 280 DRIVERS.append('google') 281 except ImportError: 282 pass 283 284 if not 'google' in DRIVERS: 285 286 try: 287 from pysqlite2 import dbapi2 as sqlite2 288 DRIVERS.append('SQLite(sqlite2)') 289 except ImportError: 290 LOGGER.debug('no SQLite drivers pysqlite2.dbapi2') 291 292 try: 293 from sqlite3 import dbapi2 as sqlite3 294 DRIVERS.append('SQLite(sqlite3)') 295 except ImportError: 296 LOGGER.debug('no SQLite drivers sqlite3') 297 298 try: 299 # first try contrib driver, then from site-packages (if installed) 300 try: 301 import contrib.pymysql as pymysql 302 # monkeypatch pymysql because they havent fixed the bug: 303 # https://github.com/petehunt/PyMySQL/issues/86 304 pymysql.ESCAPE_REGEX = re.compile("'") 305 pymysql.ESCAPE_MAP = {"'": "''"} 306 # end monkeypatch 307 except ImportError: 308 import pymysql 309 DRIVERS.append('MySQL(pymysql)') 310 except ImportError: 311 LOGGER.debug('no MySQL driver pymysql') 312 313 try: 314 import MySQLdb 315 DRIVERS.append('MySQL(MySQLdb)') 316 except ImportError: 317 LOGGER.debug('no MySQL driver MySQLDB') 318 319 320 try: 321 import psycopg2 322 from psycopg2.extensions import adapt as psycopg2_adapt 323 DRIVERS.append('PostgreSQL(psycopg2)') 324 except ImportError: 325 LOGGER.debug('no PostgreSQL driver psycopg2') 326 327 try: 328 # first try contrib driver, then from site-packages (if installed) 329 try: 330 import contrib.pg8000.dbapi as pg8000 331 except ImportError: 332 import pg8000.dbapi as pg8000 333 DRIVERS.append('PostgreSQL(pg8000)') 334 except ImportError: 335 LOGGER.debug('no PostgreSQL driver pg8000') 336 337 try: 338 import cx_Oracle 339 DRIVERS.append('Oracle(cx_Oracle)') 340 except ImportError: 341 LOGGER.debug('no Oracle driver cx_Oracle') 342 343 try: 344 try: 345 import pyodbc 346 except ImportError: 347 try: 348 import contrib.pypyodbc as pyodbc 349 except Exception, e: 350 raise ImportError(str(e)) 351 DRIVERS.append('MSSQL(pyodbc)') 352 DRIVERS.append('DB2(pyodbc)') 353 DRIVERS.append('Teradata(pyodbc)') 354 DRIVERS.append('Ingres(pyodbc)') 355 except ImportError: 356 LOGGER.debug('no MSSQL/DB2/Teradata/Ingres driver pyodbc') 357 358 try: 359 import Sybase 360 DRIVERS.append('Sybase(Sybase)') 361 except ImportError: 362 LOGGER.debug('no Sybase driver') 363 364 try: 365 import kinterbasdb 366 DRIVERS.append('Interbase(kinterbasdb)') 367 DRIVERS.append('Firebird(kinterbasdb)') 368 except ImportError: 369 LOGGER.debug('no Firebird/Interbase driver kinterbasdb') 370 371 try: 372 import fdb 373 DRIVERS.append('Firebird(fdb)') 374 except ImportError: 375 LOGGER.debug('no Firebird driver fdb') 376 ##### 377 try: 378 import firebirdsql 379 DRIVERS.append('Firebird(firebirdsql)') 380 except ImportError: 381 LOGGER.debug('no Firebird driver firebirdsql') 382 383 try: 384 import informixdb 385 DRIVERS.append('Informix(informixdb)') 386 LOGGER.warning('Informix support is experimental') 387 except ImportError: 388 LOGGER.debug('no Informix driver informixdb') 389 390 try: 391 import sapdb 392 DRIVERS.append('SQL(sapdb)') 393 LOGGER.warning('SAPDB support is experimental') 394 except ImportError: 395 LOGGER.debug('no SAP driver sapdb') 396 397 try: 398 import cubriddb 399 DRIVERS.append('Cubrid(cubriddb)') 400 LOGGER.warning('Cubrid support is experimental') 401 except ImportError: 402 LOGGER.debug('no Cubrid driver cubriddb') 403 404 try: 405 from com.ziclix.python.sql import zxJDBC 406 import java.sql 407 # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/ 408 from org.sqlite import JDBC # required by java.sql; ensure we have it 409 zxJDBC_sqlite = java.sql.DriverManager 410 DRIVERS.append('PostgreSQL(zxJDBC)') 411 DRIVERS.append('SQLite(zxJDBC)') 412 LOGGER.warning('zxJDBC support is experimental') 413 is_jdbc = True 414 except ImportError: 415 LOGGER.debug('no SQLite/PostgreSQL driver zxJDBC') 416 is_jdbc = False 417 418 try: 419 import couchdb 420 DRIVERS.append('CouchDB(couchdb)') 421 except ImportError: 422 LOGGER.debug('no Couchdb driver couchdb') 423 424 try: 425 import pymongo 426 DRIVERS.append('MongoDB(pymongo)') 427 except: 428 LOGGER.debug('no MongoDB driver pymongo') 429 430 try: 431 import imaplib 432 DRIVERS.append('IMAP(imaplib)') 433 except: 434 LOGGER.debug('no IMAP driver imaplib') 435 436 PLURALIZE_RULES = [ 437 (re.compile('child$'), re.compile('child$'), 'children'), 438 (re.compile('oot$'), re.compile('oot$'), 'eet'), 439 (re.compile('ooth$'), re.compile('ooth$'), 'eeth'), 440 (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'), 441 (re.compile('sis$'), re.compile('sis$'), 'ses'), 442 (re.compile('man$'), re.compile('man$'), 'men'), 443 (re.compile('ife$'), re.compile('ife$'), 'ives'), 444 (re.compile('eau$'), re.compile('eau$'), 'eaux'), 445 (re.compile('lf$'), re.compile('lf$'), 'lves'), 446 (re.compile('[sxz]$'), re.compile('$'), 'es'), 447 (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'), 448 (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'), 449 (re.compile('$'), re.compile('$'), 's'), 450 ]
451 452 -def pluralize(singular, rules=PLURALIZE_RULES):
453 for line in rules: 454 re_search, re_sub, replace = line 455 plural = re_search.search(singular) and re_sub.sub(replace, singular) 456 if plural: return plural
457
458 -def hide_password(uri):
459 if isinstance(uri,(list,tuple)): 460 return [hide_password(item) for item in uri] 461 return REGEX_NOPASSWD.sub('******',uri)
462
463 -def OR(a,b):
464 return a|b
465
466 -def AND(a,b):
467 return a&b
468
469 -def IDENTITY(x): return x
470
471 -def varquote_aux(name,quotestr='%s'):
472 return name if REGEX_W.match(name) else quotestr % name
473
474 -def quote_keyword(a,keyword='timestamp'):
475 regex = re.compile('\.keyword(?=\w)') 476 a = regex.sub('."%s"' % keyword,a) 477 return a
478 479 if 'google' in DRIVERS: 480 481 is_jdbc = False
482 483 - class GAEDecimalProperty(gae.Property):
484 """ 485 GAE decimal implementation 486 """ 487 data_type = decimal.Decimal 488
489 - def __init__(self, precision, scale, **kwargs):
490 super(GAEDecimalProperty, self).__init__(self, **kwargs) 491 d = '1.' 492 for x in range(scale): 493 d += '0' 494 self.round = decimal.Decimal(d)
495
496 - def get_value_for_datastore(self, model_instance):
497 value = super(GAEDecimalProperty, self)\ 498 .get_value_for_datastore(model_instance) 499 if value is None or value == '': 500 return None 501 else: 502 return str(value)
503
504 - def make_value_from_datastore(self, value):
505 if value is None or value == '': 506 return None 507 else: 508 return decimal.Decimal(value).quantize(self.round)
509
510 - def validate(self, value):
511 value = super(GAEDecimalProperty, self).validate(value) 512 if value is None or isinstance(value, decimal.Decimal): 513 return value 514 elif isinstance(value, basestring): 515 return decimal.Decimal(value) 516 raise gae.BadValueError("Property %s must be a Decimal or string."\ 517 % self.name)
518
519 ################################################################################### 520 # class that handles connection pooling (all adapters are derived from this one) 521 ################################################################################### 522 523 -class ConnectionPool(object):
524 525 POOLS = {} 526 check_active_connection = True 527 528 @staticmethod
529 - def set_folder(folder):
531 532 # ## this allows gluon to commit/rollback all dbs in this thread 533
534 - def close(self,action='commit',really=True):
535 if action: 536 if callable(action): 537 action(self) 538 else: 539 getattr(self, action)() 540 # ## if you want pools, recycle this connection 541 if self.pool_size: 542 GLOBAL_LOCKER.acquire() 543 pool = ConnectionPool.POOLS[self.uri] 544 if len(pool) < self.pool_size: 545 pool.append(self.connection) 546 really = False 547 GLOBAL_LOCKER.release() 548 if really: 549 self.close_connection() 550 self.connection = None
551 552 @staticmethod
553 - def close_all_instances(action):
554 """ to close cleanly databases in a multithreaded environment """ 555 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 556 for db_uid, db_group in dbs: 557 for db in db_group: 558 if hasattr(db,'_adapter'): 559 db._adapter.close(action) 560 getattr(THREAD_LOCAL,'db_instances',{}).clear() 561 getattr(THREAD_LOCAL,'db_instances_zombie',{}).clear() 562 if callable(action): 563 action(None) 564 return
565
566 - def find_or_make_work_folder(self):
567 """ this actually does not make the folder. it has to be there """ 568 self.folder = getattr(THREAD_LOCAL,'folder','') 569 570 if (os.path.isabs(self.folder) and 571 isinstance(self, UseDatabaseStoredFile) and 572 self.folder.startswith(os.getcwd())): 573 self.folder = os.path.relpath(self.folder, os.getcwd()) 574 575 # Creating the folder if it does not exist 576 if False and self.folder and not exists(self.folder): 577 os.mkdir(self.folder)
578
579 - def after_connection_hook(self):
580 """hook for the after_connection parameter""" 581 if callable(self._after_connection): 582 self._after_connection(self) 583 self.after_connection()
584
585 - def after_connection(self):
586 """ this it is supposed to be overloaded by adapters""" 587 pass
588
589 - def reconnect(self, f=None, cursor=True):
590 """ 591 this function defines: self.connection and self.cursor 592 (iff cursor is True) 593 if self.pool_size>0 it will try pull the connection from the pool 594 if the connection is not active (closed by db server) it will loop 595 if not self.pool_size or no active connections in pool makes a new one 596 """ 597 if getattr(self,'connection', None) != None: 598 return 599 if f is None: 600 f = self.connector 601 602 # if not hasattr(self, "driver") or self.driver is None: 603 # LOGGER.debug("Skipping connection since there's no driver") 604 # return 605 606 if not self.pool_size: 607 self.connection = f() 608 self.cursor = cursor and self.connection.cursor() 609 else: 610 uri = self.uri 611 POOLS = ConnectionPool.POOLS 612 while True: 613 GLOBAL_LOCKER.acquire() 614 if not uri in POOLS: 615 POOLS[uri] = [] 616 if POOLS[uri]: 617 self.connection = POOLS[uri].pop() 618 GLOBAL_LOCKER.release() 619 self.cursor = cursor and self.connection.cursor() 620 try: 621 if self.cursor and self.check_active_connection: 622 self.execute('SELECT 1;') 623 break 624 except: 625 pass 626 else: 627 GLOBAL_LOCKER.release() 628 self.connection = f() 629 self.cursor = cursor and self.connection.cursor() 630 break 631 self.after_connection_hook()
632
633 634 ################################################################################### 635 # this is a generic adapter that does nothing; all others are derived from this one 636 ################################################################################### 637 638 -class BaseAdapter(ConnectionPool):
639 native_json = False 640 driver = None 641 driver_name = None 642 drivers = () # list of drivers from which to pick 643 connection = None 644 commit_on_alter_table = False 645 support_distributed_transaction = False 646 uploads_in_blob = False 647 can_select_for_update = True 648 dbpath = None 649 folder = None 650 651 TRUE = 'T' 652 FALSE = 'F' 653 T_SEP = ' ' 654 QUOTE_TEMPLATE = '"%s"' 655 656 types = { 657 'boolean': 'CHAR(1)', 658 'string': 'CHAR(%(length)s)', 659 'text': 'TEXT', 660 'json': 'TEXT', 661 'password': 'CHAR(%(length)s)', 662 'blob': 'BLOB', 663 'upload': 'CHAR(%(length)s)', 664 'integer': 'INTEGER', 665 'bigint': 'INTEGER', 666 'float':'DOUBLE', 667 'double': 'DOUBLE', 668 'decimal': 'DOUBLE', 669 'date': 'DATE', 670 'time': 'TIME', 671 'datetime': 'TIMESTAMP', 672 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', 673 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 674 'list:integer': 'TEXT', 675 'list:string': 'TEXT', 676 'list:reference': 'TEXT', 677 # the two below are only used when DAL(...bigint_id=True) and replace 'id','reference' 678 'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT', 679 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 680 } 681
682 - def isOperationalError(self,exception):
683 if not hasattr(self.driver, "OperationalError"): 684 return None 685 return isinstance(exception, self.driver.OperationalError)
686
687 - def id_query(self, table):
688 return table._id != None
689
690 - def adapt(self, obj):
691 return "'%s'" % obj.replace("'", "''")
692
693 - def smart_adapt(self, obj):
694 if isinstance(obj,(int,float)): 695 return str(obj) 696 return self.adapt(str(obj))
697
698 - def file_exists(self, filename):
699 """ 700 to be used ONLY for files that on GAE may not be on filesystem 701 """ 702 return exists(filename)
703
704 - def file_open(self, filename, mode='rb', lock=True):
705 """ 706 to be used ONLY for files that on GAE may not be on filesystem 707 """ 708 if have_portalocker and lock: 709 fileobj = portalocker.LockedFile(filename,mode) 710 else: 711 fileobj = open(filename,mode) 712 return fileobj
713
714 - def file_close(self, fileobj):
715 """ 716 to be used ONLY for files that on GAE may not be on filesystem 717 """ 718 if fileobj: 719 fileobj.close()
720
721 - def file_delete(self, filename):
722 os.unlink(filename)
723
724 - def find_driver(self,adapter_args,uri=None):
725 self.adapter_args = adapter_args 726 if getattr(self,'driver',None) != None: 727 return 728 drivers_available = [driver for driver in self.drivers 729 if driver in globals()] 730 if uri: 731 items = uri.split('://',1)[0].split(':') 732 request_driver = items[1] if len(items)>1 else None 733 else: 734 request_driver = None 735 request_driver = request_driver or adapter_args.get('driver') 736 if request_driver: 737 if request_driver in drivers_available: 738 self.driver_name = request_driver 739 self.driver = globals().get(request_driver) 740 else: 741 raise RuntimeError("driver %s not available" % request_driver) 742 elif drivers_available: 743 self.driver_name = drivers_available[0] 744 self.driver = globals().get(self.driver_name) 745 else: 746 raise RuntimeError("no driver available %s" % str(self.drivers))
747
748 - def log(self, message, table=None):
749 """ Logs migrations 750 751 It will not log changes if logfile is not specified. Defaults 752 to sql.log 753 """ 754 755 isabs = None 756 logfilename = self.adapter_args.get('logfile','sql.log') 757 writelog = bool(logfilename) 758 if writelog: 759 isabs = os.path.isabs(logfilename) 760 761 if table and table._dbt and writelog and self.folder: 762 if isabs: 763 table._loggername = logfilename 764 else: 765 table._loggername = pjoin(self.folder, logfilename) 766 logfile = self.file_open(table._loggername, 'a') 767 logfile.write(message) 768 self.file_close(logfile)
769 770
771 - def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8', 772 credential_decoder=IDENTITY, driver_args={}, 773 adapter_args={},do_connect=True, after_connection=None):
774 self.db = db 775 self.dbengine = "None" 776 self.uri = uri 777 self.pool_size = pool_size 778 self.folder = folder 779 self.db_codec = db_codec 780 self._after_connection = after_connection 781 class Dummy(object): 782 lastrowid = 1 783 def __getattr__(self, value): 784 return lambda *a, **b: []
785 self.connection = Dummy() 786 self.cursor = Dummy() 787
788 - def sequence_name(self,tablename):
789 return '%s_sequence' % tablename
790
791 - def trigger_name(self,tablename):
792 return '%s_sequence' % tablename
793
794 - def varquote(self,name):
795 return name
796
797 - def create_table(self, table, 798 migrate=True, 799 fake_migrate=False, 800 polymodel=None):
801 db = table._db 802 fields = [] 803 # PostGIS geo fields are added after the table has been created 804 postcreation_fields = [] 805 sql_fields = {} 806 sql_fields_aux = {} 807 TFK = {} 808 tablename = table._tablename 809 sortable = 0 810 types = self.types 811 for field in table: 812 sortable += 1 813 field_name = field.name 814 field_type = field.type 815 if isinstance(field_type,SQLCustomType): 816 ftype = field_type.native or field_type.type 817 elif field_type.startswith('reference'): 818 referenced = field_type[10:].strip() 819 if referenced == '.': 820 referenced = tablename 821 constraint_name = self.constraint_name(tablename, field_name) 822 if not '.' in referenced \ 823 and referenced != tablename \ 824 and hasattr(table,'_primarykey'): 825 ftype = types['integer'] 826 else: 827 if hasattr(table,'_primarykey'): 828 rtablename,rfieldname = referenced.split('.') 829 rtable = db[rtablename] 830 rfield = rtable[rfieldname] 831 # must be PK reference or unique 832 if rfieldname in rtable._primarykey or \ 833 rfield.unique: 834 ftype = types[rfield.type[:9]] % \ 835 dict(length=rfield.length) 836 # multicolumn primary key reference? 837 if not rfield.unique and len(rtable._primarykey)>1: 838 # then it has to be a table level FK 839 if rtablename not in TFK: 840 TFK[rtablename] = {} 841 TFK[rtablename][rfieldname] = field_name 842 else: 843 ftype = ftype + \ 844 types['reference FK'] % dict( 845 constraint_name = constraint_name, # should be quoted 846 foreign_key = '%s (%s)' % (rtablename, 847 rfieldname), 848 table_name = tablename, 849 field_name = field_name, 850 on_delete_action=field.ondelete) 851 else: 852 # make a guess here for circular references 853 if referenced in db: 854 id_fieldname = db[referenced]._id.name 855 elif referenced == tablename: 856 id_fieldname = table._id.name 857 else: #make a guess 858 id_fieldname = 'id' 859 ftype = types[field_type[:9]] % dict( 860 index_name = field_name+'__idx', 861 field_name = field_name, 862 constraint_name = constraint_name, 863 foreign_key = '%s (%s)' % (referenced, 864 id_fieldname), 865 on_delete_action=field.ondelete) 866 elif field_type.startswith('list:reference'): 867 ftype = types[field_type[:14]] 868 elif field_type.startswith('decimal'): 869 precision, scale = map(int,field_type[8:-1].split(',')) 870 ftype = types[field_type[:7]] % \ 871 dict(precision=precision,scale=scale) 872 elif field_type.startswith('geo'): 873 if not hasattr(self,'srid'): 874 raise RuntimeError('Adapter does not support geometry') 875 srid = self.srid 876 geotype, parms = field_type[:-1].split('(') 877 if not geotype in types: 878 raise SyntaxError( 879 'Field: unknown field type: %s for %s' \ 880 % (field_type, field_name)) 881 ftype = types[geotype] 882 if self.dbengine == 'postgres' and geotype == 'geometry': 883 # parameters: schema, srid, dimension 884 dimension = 2 # GIS.dimension ??? 885 parms = parms.split(',') 886 if len(parms) == 3: 887 schema, srid, dimension = parms 888 elif len(parms) == 2: 889 schema, srid = parms 890 else: 891 schema = parms[0] 892 ftype = "SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);" % types[geotype] 893 ftype = ftype % dict(schema=schema, 894 tablename=tablename, 895 fieldname=field_name, srid=srid, 896 dimension=dimension) 897 postcreation_fields.append(ftype) 898 elif not field_type in types: 899 raise SyntaxError('Field: unknown field type: %s for %s' % \ 900 (field_type, field_name)) 901 else: 902 ftype = types[field_type]\ 903 % dict(length=field.length) 904 if not field_type.startswith('id') and \ 905 not field_type.startswith('reference'): 906 if field.notnull: 907 ftype += ' NOT NULL' 908 else: 909 ftype += self.ALLOW_NULL() 910 if field.unique: 911 ftype += ' UNIQUE' 912 if field.custom_qualifier: 913 ftype += ' %s' % field.custom_qualifier 914 915 # add to list of fields 916 sql_fields[field_name] = dict( 917 length=field.length, 918 unique=field.unique, 919 notnull=field.notnull, 920 sortable=sortable, 921 type=str(field_type), 922 sql=ftype) 923 924 if field.notnull and not field.default is None: 925 # Caveat: sql_fields and sql_fields_aux 926 # differ for default values. 927 # sql_fields is used to trigger migrations and sql_fields_aux 928 # is used for create tables. 929 # The reason is that we do not want to trigger 930 # a migration simply because a default value changes. 931 not_null = self.NOT_NULL(field.default, field_type) 932 ftype = ftype.replace('NOT NULL', not_null) 933 sql_fields_aux[field_name] = dict(sql=ftype) 934 # Postgres - PostGIS: 935 # geometry fields are added after the table has been created, not now 936 if not (self.dbengine == 'postgres' and \ 937 field_type.startswith('geom')): 938 fields.append('%s %s' % (field_name, ftype)) 939 other = ';' 940 941 # backend-specific extensions to fields 942 if self.dbengine == 'mysql': 943 if not hasattr(table, "_primarykey"): 944 fields.append('PRIMARY KEY(%s)' % table._id.name) 945 other = ' ENGINE=InnoDB CHARACTER SET utf8;' 946 947 fields = ',\n '.join(fields) 948 for rtablename in TFK: 949 rfields = TFK[rtablename] 950 pkeys = db[rtablename]._primarykey 951 fkeys = [ rfields[k] for k in pkeys ] 952 fields = fields + ',\n ' + \ 953 types['reference TFK'] % dict( 954 table_name = tablename, 955 field_name=', '.join(fkeys), 956 foreign_table = rtablename, 957 foreign_key = ', '.join(pkeys), 958 on_delete_action = field.ondelete) 959 960 if getattr(table,'_primarykey',None): 961 query = "CREATE TABLE %s(\n %s,\n %s) %s" % \ 962 (tablename, fields, 963 self.PRIMARY_KEY(', '.join(table._primarykey)),other) 964 else: 965 query = "CREATE TABLE %s(\n %s\n)%s" % \ 966 (tablename, fields, other) 967 968 if self.uri.startswith('sqlite:///') \ 969 or self.uri.startswith('spatialite:///'): 970 path_encoding = sys.getfilesystemencoding() \ 971 or locale.getdefaultlocale()[1] or 'utf8' 972 dbpath = self.uri[9:self.uri.rfind('/')]\ 973 .decode('utf8').encode(path_encoding) 974 else: 975 dbpath = self.folder 976 977 if not migrate: 978 return query 979 elif self.uri.startswith('sqlite:memory')\ 980 or self.uri.startswith('spatialite:memory'): 981 table._dbt = None 982 elif isinstance(migrate, str): 983 table._dbt = pjoin(dbpath, migrate) 984 else: 985 table._dbt = pjoin( 986 dbpath, '%s_%s.table' % (table._db._uri_hash, tablename)) 987 988 if not table._dbt or not self.file_exists(table._dbt): 989 if table._dbt: 990 self.log('timestamp: %s\n%s\n' 991 % (datetime.datetime.today().isoformat(), 992 query), table) 993 if not fake_migrate: 994 self.create_sequence_and_triggers(query,table) 995 table._db.commit() 996 # Postgres geom fields are added now, 997 # after the table has been created 998 for query in postcreation_fields: 999 self.execute(query) 1000 table._db.commit() 1001 if table._dbt: 1002 tfile = self.file_open(table._dbt, 'w') 1003 pickle.dump(sql_fields, tfile) 1004 self.file_close(tfile) 1005 if fake_migrate: 1006 self.log('faked!\n', table) 1007 else: 1008 self.log('success!\n', table) 1009 else: 1010 tfile = self.file_open(table._dbt, 'r') 1011 try: 1012 sql_fields_old = pickle.load(tfile) 1013 except EOFError: 1014 self.file_close(tfile) 1015 raise RuntimeError('File %s appears corrupted' % table._dbt) 1016 self.file_close(tfile) 1017 if sql_fields != sql_fields_old: 1018 self.migrate_table(table, 1019 sql_fields, sql_fields_old, 1020 sql_fields_aux, None, 1021 fake_migrate=fake_migrate) 1022 return query
1023
1024 - def migrate_table( 1025 self, 1026 table, 1027 sql_fields, 1028 sql_fields_old, 1029 sql_fields_aux, 1030 logfile, 1031 fake_migrate=False, 1032 ):
1033 1034 # logfile is deprecated (moved to adapter.log method) 1035 db = table._db 1036 db._migrated.append(table._tablename) 1037 tablename = table._tablename 1038 def fix(item): 1039 k,v=item 1040 if not isinstance(v,dict): 1041 v=dict(type='unknown',sql=v) 1042 return k.lower(),v
1043 # make sure all field names are lower case to avoid 1044 # migrations because of case cahnge 1045 sql_fields = dict(map(fix,sql_fields.iteritems())) 1046 sql_fields_old = dict(map(fix,sql_fields_old.iteritems())) 1047 sql_fields_aux = dict(map(fix,sql_fields_aux.iteritems())) 1048 if db._debug: 1049 logging.debug('migrating %s to %s' % (sql_fields_old,sql_fields)) 1050 1051 keys = sql_fields.keys() 1052 for key in sql_fields_old: 1053 if not key in keys: 1054 keys.append(key) 1055 new_add = self.concat_add(tablename) 1056 1057 metadata_change = False 1058 sql_fields_current = copy.copy(sql_fields_old) 1059 for key in keys: 1060 query = None 1061 if not key in sql_fields_old: 1062 sql_fields_current[key] = sql_fields[key] 1063 if self.dbengine in ('postgres',) and \ 1064 sql_fields[key]['type'].startswith('geometry'): 1065 # 'sql' == ftype in sql 1066 query = [ sql_fields[key]['sql'] ] 1067 else: 1068 query = ['ALTER TABLE %s ADD %s %s;' % \ 1069 (tablename, key, 1070 sql_fields_aux[key]['sql'].replace(', ', new_add))] 1071 metadata_change = True 1072 elif self.dbengine in ('sqlite', 'spatialite'): 1073 if key in sql_fields: 1074 sql_fields_current[key] = sql_fields[key] 1075 metadata_change = True 1076 elif not key in sql_fields: 1077 del sql_fields_current[key] 1078 ftype = sql_fields_old[key]['type'] 1079 if self.dbengine in ('postgres',) and ftype.startswith('geometry'): 1080 geotype, parms = ftype[:-1].split('(') 1081 schema = parms.split(',')[0] 1082 query = [ "SELECT DropGeometryColumn ('%(schema)s', '%(table)s', '%(field)s');" % 1083 dict(schema=schema, table=tablename, field=key,) ] 1084 elif self.dbengine in ('firebird',): 1085 query = ['ALTER TABLE %s DROP %s;' % (tablename, key)] 1086 else: 1087 query = ['ALTER TABLE %s DROP COLUMN %s;' 1088 % (tablename, key)] 1089 metadata_change = True 1090 elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \ 1091 and not (key in table.fields and 1092 isinstance(table[key].type, SQLCustomType)) \ 1093 and not sql_fields[key]['type'].startswith('reference')\ 1094 and not sql_fields[key]['type'].startswith('double')\ 1095 and not sql_fields[key]['type'].startswith('id'): 1096 sql_fields_current[key] = sql_fields[key] 1097 t = tablename 1098 tt = sql_fields_aux[key]['sql'].replace(', ', new_add) 1099 if self.dbengine in ('firebird',): 1100 drop_expr = 'ALTER TABLE %s DROP %s;' 1101 else: 1102 drop_expr = 'ALTER TABLE %s DROP COLUMN %s;' 1103 key_tmp = key + '__tmp' 1104 query = ['ALTER TABLE %s ADD %s %s;' % (t, key_tmp, tt), 1105 'UPDATE %s SET %s=%s;' % (t, key_tmp, key), 1106 drop_expr % (t, key), 1107 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 1108 'UPDATE %s SET %s=%s;' % (t, key, key_tmp), 1109 drop_expr % (t, key_tmp)] 1110 metadata_change = True 1111 elif sql_fields[key]['type'] != sql_fields_old[key]['type']: 1112 sql_fields_current[key] = sql_fields[key] 1113 metadata_change = True 1114 1115 if query: 1116 self.log('timestamp: %s\n' 1117 % datetime.datetime.today().isoformat(), table) 1118 db['_lastsql'] = '\n'.join(query) 1119 for sub_query in query: 1120 self.log(sub_query + '\n', table) 1121 if fake_migrate: 1122 if db._adapter.commit_on_alter_table: 1123 self.save_dbt(table,sql_fields_current) 1124 self.log('faked!\n', table) 1125 else: 1126 self.execute(sub_query) 1127 # Caveat: mysql, oracle and firebird do not allow multiple alter table 1128 # in one transaction so we must commit partial transactions and 1129 # update table._dbt after alter table. 1130 if db._adapter.commit_on_alter_table: 1131 db.commit() 1132 self.save_dbt(table,sql_fields_current) 1133 self.log('success!\n', table) 1134 1135 elif metadata_change: 1136 self.save_dbt(table,sql_fields_current) 1137 1138 if metadata_change and not (query and db._adapter.commit_on_alter_table): 1139 db.commit() 1140 self.save_dbt(table,sql_fields_current) 1141 self.log('success!\n', table) 1142
1143 - def save_dbt(self,table, sql_fields_current):
1144 tfile = self.file_open(table._dbt, 'w') 1145 pickle.dump(sql_fields_current, tfile) 1146 self.file_close(tfile)
1147
1148 - def LOWER(self, first):
1149 return 'LOWER(%s)' % self.expand(first)
1150
1151 - def UPPER(self, first):
1152 return 'UPPER(%s)' % self.expand(first)
1153
1154 - def COUNT(self, first, distinct=None):
1155 return ('COUNT(%s)' if not distinct else 'COUNT(DISTINCT %s)') \ 1156 % self.expand(first)
1157
1158 - def EXTRACT(self, first, what):
1159 return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
1160
1161 - def EPOCH(self, first):
1162 return self.EXTRACT(first, 'epoch')
1163
1164 - def LENGTH(self, first):
1165 return "LENGTH(%s)" % self.expand(first)
1166
1167 - def AGGREGATE(self, first, what):
1168 return "%s(%s)" % (what, self.expand(first))
1169
1170 - def JOIN(self):
1171 return 'JOIN'
1172
1173 - def LEFT_JOIN(self):
1174 return 'LEFT JOIN'
1175
1176 - def RANDOM(self):
1177 return 'Random()'
1178
1179 - def NOT_NULL(self, default, field_type):
1180 return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
1181
1182 - def COALESCE(self, first, second):
1183 expressions = [self.expand(first)]+[self.expand(e) for e in second] 1184 return 'COALESCE(%s)' % ','.join(expressions)
1185
1186 - def COALESCE_ZERO(self, first):
1187 return 'COALESCE(%s,0)' % self.expand(first)
1188
1189 - def RAW(self, first):
1190 return first
1191
1192 - def ALLOW_NULL(self):
1193 return ''
1194
1195 - def SUBSTRING(self, field, parameters):
1196 return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1197
1198 - def PRIMARY_KEY(self, key):
1199 return 'PRIMARY KEY(%s)' % key
1200
1201 - def _drop(self, table, mode):
1202 return ['DROP TABLE %s;' % table]
1203
1204 - def drop(self, table, mode=''):
1205 db = table._db 1206 queries = self._drop(table, mode) 1207 for query in queries: 1208 if table._dbt: 1209 self.log(query + '\n', table) 1210 self.execute(query) 1211 db.commit() 1212 del db[table._tablename] 1213 del db.tables[db.tables.index(table._tablename)] 1214 db._remove_references_to(table) 1215 if table._dbt: 1216 self.file_delete(table._dbt) 1217 self.log('success!\n', table)
1218
1219 - def _insert(self, table, fields):
1220 if fields: 1221 keys = ','.join(f.name for f, v in fields) 1222 values = ','.join(self.expand(v, f.type) for f, v in fields) 1223 return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values) 1224 else: 1225 return self._insert_empty(table)
1226
1227 - def _insert_empty(self, table):
1228 return 'INSERT INTO %s DEFAULT VALUES;' % table
1229
1230 - def insert(self, table, fields):
1231 query = self._insert(table,fields) 1232 try: 1233 self.execute(query) 1234 except Exception: 1235 e = sys.exc_info()[1] 1236 if hasattr(table,'_on_insert_error'): 1237 return table._on_insert_error(table,fields,e) 1238 raise e 1239 if hasattr(table,'_primarykey'): 1240 return dict([(k[0].name, k[1]) for k in fields \ 1241 if k[0].name in table._primarykey]) 1242 id = self.lastrowid(table) 1243 if not isinstance(id,int): 1244 return id 1245 rid = Reference(id) 1246 (rid._table, rid._record) = (table, None) 1247 return rid
1248
1249 - def bulk_insert(self, table, items):
1250 return [self.insert(table,item) for item in items]
1251
1252 - def NOT(self, first):
1253 return '(NOT %s)' % self.expand(first)
1254
1255 - def AND(self, first, second):
1256 return '(%s AND %s)' % (self.expand(first), self.expand(second))
1257
1258 - def OR(self, first, second):
1259 return '(%s OR %s)' % (self.expand(first), self.expand(second))
1260
1261 - def BELONGS(self, first, second):
1262 if isinstance(second, str): 1263 return '(%s IN (%s))' % (self.expand(first), second[:-1]) 1264 elif not second: 1265 return '(1=0)' 1266 items = ','.join(self.expand(item, first.type) for item in second) 1267 return '(%s IN (%s))' % (self.expand(first), items)
1268
1269 - def REGEXP(self, first, second):
1270 "regular expression operator" 1271 raise NotImplementedError
1272
1273 - def LIKE(self, first, second):
1274 "case sensitive like operator" 1275 raise NotImplementedError
1276
1277 - def ILIKE(self, first, second):
1278 "case in-sensitive like operator" 1279 return '(%s LIKE %s)' % (self.expand(first), 1280 self.expand(second, 'string'))
1281
1282 - def STARTSWITH(self, first, second):
1283 return '(%s LIKE %s)' % (self.expand(first), 1284 self.expand(second+'%', 'string'))
1285
1286 - def ENDSWITH(self, first, second):
1287 return '(%s LIKE %s)' % (self.expand(first), 1288 self.expand('%'+second, 'string'))
1289
1290 - def CONTAINS(self,first,second,case_sensitive=False):
1291 if first.type in ('string','text', 'json'): 1292 if isinstance(second,Expression): 1293 second = Expression(None,self.CONCAT('%',Expression( 1294 None,self.REPLACE(second,('%','%%'))),'%')) 1295 else: 1296 second = '%'+str(second).replace('%','%%')+'%' 1297 elif first.type.startswith('list:'): 1298 if isinstance(second,Expression): 1299 second = Expression(None,self.CONCAT( 1300 '%|',Expression(None,self.REPLACE( 1301 Expression(None,self.REPLACE( 1302 second,('%','%%'))),('|','||'))),'|%')) 1303 else: 1304 second = '%|'+str(second).replace('%','%%')\ 1305 .replace('|','||')+'|%' 1306 op = case_sensitive and self.LIKE or self.ILIKE 1307 return op(first,second)
1308
1309 - def EQ(self, first, second=None):
1310 if second is None: 1311 return '(%s IS NULL)' % self.expand(first) 1312 return '(%s = %s)' % (self.expand(first), 1313 self.expand(second, first.type))
1314
1315 - def NE(self, first, second=None):
1316 if second is None: 1317 return '(%s IS NOT NULL)' % self.expand(first) 1318 return '(%s <> %s)' % (self.expand(first), 1319 self.expand(second, first.type))
1320
1321 - def LT(self,first,second=None):
1322 if second is None: 1323 raise RuntimeError("Cannot compare %s < None" % first) 1324 return '(%s < %s)' % (self.expand(first), 1325 self.expand(second,first.type))
1326
1327 - def LE(self,first,second=None):
1328 if second is None: 1329 raise RuntimeError("Cannot compare %s <= None" % first) 1330 return '(%s <= %s)' % (self.expand(first), 1331 self.expand(second,first.type))
1332
1333 - def GT(self,first,second=None):
1334 if second is None: 1335 raise RuntimeError("Cannot compare %s > None" % first) 1336 return '(%s > %s)' % (self.expand(first), 1337 self.expand(second,first.type))
1338
1339 - def GE(self,first,second=None):
1340 if second is None: 1341 raise RuntimeError("Cannot compare %s >= None" % first) 1342 return '(%s >= %s)' % (self.expand(first), 1343 self.expand(second,first.type))
1344
1345 - def is_numerical_type(self, ftype):
1346 return ftype in ('integer','boolean','double','bigint') or \ 1347 ftype.startswith('decimal')
1348
1349 - def REPLACE(self, first, (second, third)):
1350 return 'REPLACE(%s,%s,%s)' % (self.expand(first,'string'), 1351 self.expand(second,'string'), 1352 self.expand(third,'string'))
1353
1354 - def CONCAT(self, *items):
1355 return '(%s)' % ' || '.join(self.expand(x,'string') for x in items)
1356
1357 - def ADD(self, first, second):
1358 if self.is_numerical_type(first.type): 1359 return '(%s + %s)' % (self.expand(first), 1360 self.expand(second, first.type)) 1361 else: 1362 return self.CONCAT(first, second)
1363
1364 - def SUB(self, first, second):
1365 return '(%s - %s)' % (self.expand(first), 1366 self.expand(second, first.type))
1367
1368 - def MUL(self, first, second):
1369 return '(%s * %s)' % (self.expand(first), 1370 self.expand(second, first.type))
1371
1372 - def DIV(self, first, second):
1373 return '(%s / %s)' % (self.expand(first), 1374 self.expand(second, first.type))
1375
1376 - def MOD(self, first, second):
1377 return '(%s %% %s)' % (self.expand(first), 1378 self.expand(second, first.type))
1379
1380 - def AS(self, first, second):
1381 return '%s AS %s' % (self.expand(first), second)
1382
1383 - def ON(self, first, second):
1384 if use_common_filters(second): 1385 second = self.common_filter(second,[first._tablename]) 1386 return '%s ON %s' % (self.expand(first), self.expand(second))
1387
1388 - def INVERT(self, first):
1389 return '%s DESC' % self.expand(first)
1390
1391 - def COMMA(self, first, second):
1392 return '%s, %s' % (self.expand(first), self.expand(second))
1393
1394 - def expand(self, expression, field_type=None):
1395 if isinstance(expression, Field): 1396 out = '%s.%s' % (expression.table._tablename, expression.name) 1397 if field_type == 'string' and not expression.type in ( 1398 'string','text','json','password'): 1399 out = 'CAST(%s AS %s)' % (out, self.types['text']) 1400 return out 1401 elif isinstance(expression, (Expression, Query)): 1402 first = expression.first 1403 second = expression.second 1404 op = expression.op 1405 optional_args = expression.optional_args or {} 1406 if not second is None: 1407 out = op(first, second, **optional_args) 1408 elif not first is None: 1409 out = op(first,**optional_args) 1410 elif isinstance(op, str): 1411 if op.endswith(';'): 1412 op=op[:-1] 1413 out = '(%s)' % op 1414 else: 1415 out = op() 1416 return out 1417 elif field_type: 1418 return str(self.represent(expression,field_type)) 1419 elif isinstance(expression,(list,tuple)): 1420 return ','.join(self.represent(item,field_type) \ 1421 for item in expression) 1422 elif isinstance(expression, bool): 1423 return '1' if expression else '0' 1424 else: 1425 return str(expression)
1426
1427 - def table_alias(self,name):
1428 return str(name if isinstance(name,Table) else self.db[name])
1429
1430 - def alias(self, table, alias):
1431 """ 1432 Given a table object, makes a new table object 1433 with alias name. 1434 """ 1435 other = copy.copy(table) 1436 other['_ot'] = other._ot or other._tablename 1437 other['ALL'] = SQLALL(other) 1438 other['_tablename'] = alias 1439 for fieldname in other.fields: 1440 other[fieldname] = copy.copy(other[fieldname]) 1441 other[fieldname]._tablename = alias 1442 other[fieldname].tablename = alias 1443 other[fieldname].table = other 1444 table._db[alias] = other 1445 return other
1446
1447 - def _truncate(self, table, mode=''):
1448 tablename = table._tablename 1449 return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')]
1450
1451 - def truncate(self, table, mode= ' '):
1452 # Prepare functions "write_to_logfile" and "close_logfile" 1453 try: 1454 queries = table._db._adapter._truncate(table, mode) 1455 for query in queries: 1456 self.log(query + '\n', table) 1457 self.execute(query) 1458 table._db.commit() 1459 self.log('success!\n', table) 1460 finally: 1461 pass
1462
1463 - def _update(self, tablename, query, fields):
1464 if query: 1465 if use_common_filters(query): 1466 query = self.common_filter(query, [tablename]) 1467 sql_w = ' WHERE ' + self.expand(query) 1468 else: 1469 sql_w = '' 1470 sql_v = ','.join(['%s=%s' % (field.name, 1471 self.expand(value, field.type)) \ 1472 for (field, value) in fields]) 1473 tablename = "%s" % self.db[tablename] 1474 return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
1475
1476 - def update(self, tablename, query, fields):
1477 sql = self._update(tablename, query, fields) 1478 try: 1479 self.execute(sql) 1480 except Exception: 1481 e = sys.exc_info()[1] 1482 table = self.db[tablename] 1483 if hasattr(table,'_on_update_error'): 1484 return table._on_update_error(table,query,fields,e) 1485 raise e 1486 try: 1487 return self.cursor.rowcount 1488 except: 1489 return None
1490
1491 - def _delete(self, tablename, query):
1492 if query: 1493 if use_common_filters(query): 1494 query = self.common_filter(query, [tablename]) 1495 sql_w = ' WHERE ' + self.expand(query) 1496 else: 1497 sql_w = '' 1498 return 'DELETE FROM %s%s;' % (tablename, sql_w)
1499
1500 - def delete(self, tablename, query):
1501 sql = self._delete(tablename, query) 1502 ### special code to handle CASCADE in SQLite & SpatiaLite 1503 db = self.db 1504 table = db[tablename] 1505 if self.dbengine in ('sqlite', 'spatialite') and table._referenced_by: 1506 deleted = [x[table._id.name] for x in db(query).select(table._id)] 1507 ### end special code to handle CASCADE in SQLite & SpatiaLite 1508 self.execute(sql) 1509 try: 1510 counter = self.cursor.rowcount 1511 except: 1512 counter = None 1513 ### special code to handle CASCADE in SQLite & SpatiaLite 1514 if self.dbengine in ('sqlite', 'spatialite') and counter: 1515 for field in table._referenced_by: 1516 if field.type=='reference '+table._tablename \ 1517 and field.ondelete=='CASCADE': 1518 db(field.belongs(deleted)).delete() 1519 ### end special code to handle CASCADE in SQLite & SpatiaLite 1520 return counter
1521
1522 - def get_table(self, query):
1523 tablenames = self.tables(query) 1524 if len(tablenames)==1: 1525 return tablenames[0] 1526 elif len(tablenames)<1: 1527 raise RuntimeError("No table selected") 1528 else: 1529 raise RuntimeError("Too many tables selected")
1530
1531 - def expand_all(self, fields, tablenames):
1532 db = self.db 1533 new_fields = [] 1534 append = new_fields.append 1535 for item in fields: 1536 if isinstance(item,SQLALL): 1537 new_fields += item._table 1538 elif isinstance(item,str): 1539 if REGEX_TABLE_DOT_FIELD.match(item): 1540 tablename,fieldname = item.split('.') 1541 append(db[tablename][fieldname]) 1542 else: 1543 append(Expression(db,lambda item=item:item)) 1544 else: 1545 append(item) 1546 # ## if no fields specified take them all from the requested tables 1547 if not new_fields: 1548 for table in tablenames: 1549 for field in db[table]: 1550 append(field) 1551 return new_fields
1552
1553 - def _select(self, query, fields, attributes):
1554 tables = self.tables 1555 for key in set(attributes.keys())-SELECT_ARGS: 1556 raise SyntaxError('invalid select attribute: %s' % key) 1557 args_get = attributes.get 1558 tablenames = tables(query) 1559 tablenames_for_common_filters = tablenames 1560 for field in fields: 1561 if isinstance(field, basestring) \ 1562 and REGEX_TABLE_DOT_FIELD.match(field): 1563 tn,fn = field.split('.') 1564 field = self.db[tn][fn] 1565 for tablename in tables(field): 1566 if not tablename in tablenames: 1567 tablenames.append(tablename) 1568 1569 if len(tablenames) < 1: 1570 raise SyntaxError('Set: no tables selected') 1571 self._colnames = map(self.expand, fields) 1572 def geoexpand(field): 1573 if isinstance(field.type,str) and field.type.startswith('geometry'): 1574 field = field.st_astext() 1575 return self.expand(field)
1576 sql_f = ', '.join(map(geoexpand, fields)) 1577 sql_o = '' 1578 sql_s = '' 1579 left = args_get('left', False) 1580 inner_join = args_get('join', False) 1581 distinct = args_get('distinct', False) 1582 groupby = args_get('groupby', False) 1583 orderby = args_get('orderby', False) 1584 having = args_get('having', False) 1585 limitby = args_get('limitby', False) 1586 orderby_on_limitby = args_get('orderby_on_limitby', True) 1587 for_update = args_get('for_update', False) 1588 if self.can_select_for_update is False and for_update is True: 1589 raise SyntaxError('invalid select attribute: for_update') 1590 if distinct is True: 1591 sql_s += 'DISTINCT' 1592 elif distinct: 1593 sql_s += 'DISTINCT ON (%s)' % distinct 1594 if inner_join: 1595 icommand = self.JOIN() 1596 if not isinstance(inner_join, (tuple, list)): 1597 inner_join = [inner_join] 1598 ijoint = [t._tablename for t in inner_join 1599 if not isinstance(t,Expression)] 1600 ijoinon = [t for t in inner_join if isinstance(t, Expression)] 1601 itables_to_merge={} #issue 490 1602 [itables_to_merge.update( 1603 dict.fromkeys(tables(t))) for t in ijoinon] 1604 ijoinont = [t.first._tablename for t in ijoinon] 1605 [itables_to_merge.pop(t) for t in ijoinont 1606 if t in itables_to_merge] #issue 490 1607 iimportant_tablenames = ijoint + ijoinont + itables_to_merge.keys() 1608 iexcluded = [t for t in tablenames 1609 if not t in iimportant_tablenames] 1610 if left: 1611 join = attributes['left'] 1612 command = self.LEFT_JOIN() 1613 if not isinstance(join, (tuple, list)): 1614 join = [join] 1615 joint = [t._tablename for t in join 1616 if not isinstance(t, Expression)] 1617 joinon = [t for t in join if isinstance(t, Expression)] 1618 #patch join+left patch (solves problem with ordering in left joins) 1619 tables_to_merge={} 1620 [tables_to_merge.update( 1621 dict.fromkeys(tables(t))) for t in joinon] 1622 joinont = [t.first._tablename for t in joinon] 1623 [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] 1624 tablenames_for_common_filters = [t for t in tablenames 1625 if not t in joinont ] 1626 important_tablenames = joint + joinont + tables_to_merge.keys() 1627 excluded = [t for t in tablenames 1628 if not t in important_tablenames ] 1629 else: 1630 excluded = tablenames 1631 1632 if use_common_filters(query): 1633 query = self.common_filter(query,tablenames_for_common_filters) 1634 sql_w = ' WHERE ' + self.expand(query) if query else '' 1635 1636 if inner_join and not left: 1637 sql_t = ', '.join([self.table_alias(t) for t in iexcluded + \ 1638 itables_to_merge.keys()]) 1639 for t in ijoinon: 1640 sql_t += ' %s %s' % (icommand, t) 1641 elif not inner_join and left: 1642 sql_t = ', '.join([self.table_alias(t) for t in excluded + \ 1643 tables_to_merge.keys()]) 1644 if joint: 1645 sql_t += ' %s %s' % (command, 1646 ','.join([self.table_alias(t) for t in joint])) 1647 for t in joinon: 1648 sql_t += ' %s %s' % (command, t) 1649 elif inner_join and left: 1650 all_tables_in_query = set(important_tablenames + \ 1651 iimportant_tablenames + \ 1652 tablenames) 1653 tables_in_joinon = set(joinont + ijoinont) 1654 tables_not_in_joinon = \ 1655 all_tables_in_query.difference(tables_in_joinon) 1656 sql_t = ','.join([self.table_alias(t) for t in tables_not_in_joinon]) 1657 for t in ijoinon: 1658 sql_t += ' %s %s' % (icommand, t) 1659 if joint: 1660 sql_t += ' %s %s' % (command, 1661 ','.join([self.table_alias(t) for t in joint])) 1662 for t in joinon: 1663 sql_t += ' %s %s' % (command, t) 1664 else: 1665 sql_t = ', '.join(self.table_alias(t) for t in tablenames) 1666 if groupby: 1667 if isinstance(groupby, (list, tuple)): 1668 groupby = xorify(groupby) 1669 sql_o += ' GROUP BY %s' % self.expand(groupby) 1670 if having: 1671 sql_o += ' HAVING %s' % attributes['having'] 1672 if orderby: 1673 if isinstance(orderby, (list, tuple)): 1674 orderby = xorify(orderby) 1675 if str(orderby) == '<random>': 1676 sql_o += ' ORDER BY %s' % self.RANDOM() 1677 else: 1678 sql_o += ' ORDER BY %s' % self.expand(orderby) 1679 if limitby: 1680 if orderby_on_limitby and not orderby and tablenames: 1681 sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in (hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey or [self.db[t]._id.name])]) 1682 # oracle does not support limitby 1683 sql = self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby) 1684 if for_update and self.can_select_for_update is True: 1685 sql = sql.rstrip(';') + ' FOR UPDATE;' 1686 return sql 1687
1688 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1689 if limitby: 1690 (lmin, lmax) = limitby 1691 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 1692 return 'SELECT %s %s FROM %s%s%s;' % \ 1693 (sql_s, sql_f, sql_t, sql_w, sql_o)
1694
1695 - def _fetchall(self):
1696 return self.cursor.fetchall()
1697
1698 - def _select_aux(self,sql,fields,attributes):
1699 args_get = attributes.get 1700 cache = args_get('cache',None) 1701 if not cache: 1702 self.execute(sql) 1703 rows = self._fetchall() 1704 else: 1705 (cache_model, time_expire) = cache 1706 key = self.uri + '/' + sql + '/rows' 1707 if len(key)>200: key = hashlib_md5(key).hexdigest() 1708 def _select_aux2(): 1709 self.execute(sql) 1710 return self._fetchall()
1711 rows = cache_model(key,_select_aux2,time_expire) 1712 if isinstance(rows,tuple): 1713 rows = list(rows) 1714 limitby = args_get('limitby', None) or (0,) 1715 rows = self.rowslice(rows,limitby[0],None) 1716 processor = args_get('processor',self.parse) 1717 cacheable = args_get('cacheable',False) 1718 return processor(rows,fields,self._colnames,cacheable=cacheable) 1719
1720 - def select(self, query, fields, attributes):
1721 """ 1722 Always returns a Rows object, possibly empty. 1723 """ 1724 sql = self._select(query, fields, attributes) 1725 cache = attributes.get('cache', None) 1726 if cache and attributes.get('cacheable',False): 1727 del attributes['cache'] 1728 (cache_model, time_expire) = cache 1729 key = self.uri + '/' + sql 1730 if len(key)>200: key = hashlib_md5(key).hexdigest() 1731 args = (sql,fields,attributes) 1732 return cache_model( 1733 key, 1734 lambda self=self,args=args:self._select_aux(*args), 1735 time_expire) 1736 else: 1737 return self._select_aux(sql,fields,attributes)
1738
1739 - def _count(self, query, distinct=None):
1740 tablenames = self.tables(query) 1741 if query: 1742 if use_common_filters(query): 1743 query = self.common_filter(query, tablenames) 1744 sql_w = ' WHERE ' + self.expand(query) 1745 else: 1746 sql_w = '' 1747 sql_t = ','.join(self.table_alias(t) for t in tablenames) 1748 if distinct: 1749 if isinstance(distinct,(list, tuple)): 1750 distinct = xorify(distinct) 1751 sql_d = self.expand(distinct) 1752 return 'SELECT count(DISTINCT %s) FROM %s%s;' % \ 1753 (sql_d, sql_t, sql_w) 1754 return 'SELECT count(*) FROM %s%s;' % (sql_t, sql_w)
1755
1756 - def count(self, query, distinct=None):
1757 self.execute(self._count(query, distinct)) 1758 return self.cursor.fetchone()[0]
1759
1760 - def tables(self, *queries):
1761 tables = set() 1762 for query in queries: 1763 if isinstance(query, Field): 1764 tables.add(query.tablename) 1765 elif isinstance(query, (Expression, Query)): 1766 if not query.first is None: 1767 tables = tables.union(self.tables(query.first)) 1768 if not query.second is None: 1769 tables = tables.union(self.tables(query.second)) 1770 return list(tables)
1771
1772 - def commit(self):
1773 if self.connection: return self.connection.commit()
1774
1775 - def rollback(self):
1776 if self.connection: return self.connection.rollback()
1777
1778 - def close_connection(self):
1779 if self.connection: return self.connection.close()
1780
1781 - def distributed_transaction_begin(self, key):
1782 return
1783
1784 - def prepare(self, key):
1785 if self.connection: self.connection.prepare()
1786
1787 - def commit_prepared(self, key):
1788 if self.connection: self.connection.commit()
1789
1790 - def rollback_prepared(self, key):
1791 if self.connection: self.connection.rollback()
1792
1793 - def concat_add(self, tablename):
1794 return ', ADD '
1795
1796 - def constraint_name(self, table, fieldname):
1797 return '%s_%s__constraint' % (table,fieldname)
1798
1799 - def create_sequence_and_triggers(self, query, table, **args):
1800 self.execute(query)
1801
1802 - def log_execute(self, *a, **b):
1803 if not self.connection: return None 1804 command = a[0] 1805 if hasattr(self,'filter_sql_command'): 1806 command = self.filter_sql_command(command) 1807 if self.db._debug: 1808 LOGGER.debug('SQL: %s' % command) 1809 self.db._lastsql = command 1810 t0 = time.time() 1811 ret = self.cursor.execute(command, *a[1:], **b) 1812 self.db._timings.append((command,time.time()-t0)) 1813 del self.db._timings[:-TIMINGSSIZE] 1814 return ret
1815
1816 - def execute(self, *a, **b):
1817 return self.log_execute(*a, **b)
1818
1819 - def represent(self, obj, fieldtype):
1820 field_is_type = fieldtype.startswith 1821 if isinstance(obj, CALLABLETYPES): 1822 obj = obj() 1823 if isinstance(fieldtype, SQLCustomType): 1824 value = fieldtype.encoder(obj) 1825 if fieldtype.type in ('string','text', 'json'): 1826 return self.adapt(value) 1827 return value 1828 if isinstance(obj, (Expression, Field)): 1829 return str(obj) 1830 if field_is_type('list:'): 1831 if not obj: 1832 obj = [] 1833 elif not isinstance(obj, (list, tuple)): 1834 obj = [obj] 1835 if field_is_type('list:string'): 1836 obj = map(str,obj) 1837 else: 1838 obj = map(int,[o for o in obj if o != '']) 1839 # we don't want to bar_encode json objects 1840 if isinstance(obj, (list, tuple)) and (not fieldtype == "json"): 1841 obj = bar_encode(obj) 1842 if obj is None: 1843 return 'NULL' 1844 if obj == '' and not fieldtype[:2] in ['st', 'te', 'js', 'pa', 'up']: 1845 return 'NULL' 1846 r = self.represent_exceptions(obj, fieldtype) 1847 if not r is None: 1848 return r 1849 if fieldtype == 'boolean': 1850 if obj and not str(obj)[:1].upper() in '0F': 1851 return self.smart_adapt(self.TRUE) 1852 else: 1853 return self.smart_adapt(self.FALSE) 1854 if fieldtype == 'id' or fieldtype == 'integer': 1855 return str(long(obj)) 1856 if field_is_type('decimal'): 1857 return str(obj) 1858 elif field_is_type('reference'): # reference 1859 if fieldtype.find('.')>0: 1860 return repr(obj) 1861 elif isinstance(obj, (Row, Reference)): 1862 return str(obj['id']) 1863 return str(long(obj)) 1864 elif fieldtype == 'double': 1865 return repr(float(obj)) 1866 if isinstance(obj, unicode): 1867 obj = obj.encode(self.db_codec) 1868 if fieldtype == 'blob': 1869 obj = base64.b64encode(str(obj)) 1870 elif fieldtype == 'date': 1871 if isinstance(obj, (datetime.date, datetime.datetime)): 1872 obj = obj.isoformat()[:10] 1873 else: 1874 obj = str(obj) 1875 elif fieldtype == 'datetime': 1876 if isinstance(obj, datetime.datetime): 1877 obj = obj.isoformat(self.T_SEP)[:19] 1878 elif isinstance(obj, datetime.date): 1879 obj = obj.isoformat()[:10]+self.T_SEP+'00:00:00' 1880 else: 1881 obj = str(obj) 1882 elif fieldtype == 'time': 1883 if isinstance(obj, datetime.time): 1884 obj = obj.isoformat()[:10] 1885 else: 1886 obj = str(obj) 1887 elif fieldtype == 'json': 1888 if not self.native_json: 1889 if have_serializers: 1890 obj = serializers.json(obj) 1891 elif simplejson: 1892 obj = simplejson.dumps(obj) 1893 else: 1894 raise RuntimeError("missing simplejson") 1895 if not isinstance(obj,bytes): 1896 obj = bytes(obj) 1897 try: 1898 obj.decode(self.db_codec) 1899 except: 1900 obj = obj.decode('latin1').encode(self.db_codec) 1901 return self.adapt(obj)
1902
1903 - def represent_exceptions(self, obj, fieldtype):
1904 return None
1905
1906 - def lastrowid(self, table):
1907 return None
1908
1909 - def rowslice(self, rows, minimum=0, maximum=None):
1910 """ 1911 By default this function does nothing; 1912 overload when db does not do slicing. 1913 """ 1914 return rows
1915
1916 - def parse_value(self, value, field_type, blob_decode=True):
1917 if field_type != 'blob' and isinstance(value, str): 1918 try: 1919 value = value.decode(self.db._db_codec) 1920 except Exception: 1921 pass 1922 if isinstance(value, unicode): 1923 value = value.encode('utf-8') 1924 if isinstance(field_type, SQLCustomType): 1925 value = field_type.decoder(value) 1926 if not isinstance(field_type, str) or value is None: 1927 return value 1928 elif field_type in ('string', 'text', 'password', 'upload', 'dict'): 1929 return value 1930 elif field_type.startswith('geo'): 1931 return value 1932 elif field_type == 'blob' and not blob_decode: 1933 return value 1934 else: 1935 key = REGEX_TYPE.match(field_type).group(0) 1936 return self.parsemap[key](value,field_type)
1937
1938 - def parse_reference(self, value, field_type):
1939 referee = field_type[10:].strip() 1940 if not '.' in referee: 1941 value = Reference(value) 1942 value._table, value._record = self.db[referee], None 1943 return value
1944
1945 - def parse_boolean(self, value, field_type):
1946 return value == self.TRUE or str(value)[:1].lower() == 't'
1947
1948 - def parse_date(self, value, field_type):
1949 if isinstance(value, datetime.datetime): 1950 return value.date() 1951 if not isinstance(value, (datetime.date,datetime.datetime)): 1952 (y, m, d) = map(int, str(value)[:10].strip().split('-')) 1953 value = datetime.date(y, m, d) 1954 return value
1955
1956 - def parse_time(self, value, field_type):
1957 if not isinstance(value, datetime.time): 1958 time_items = map(int,str(value)[:8].strip().split(':')[:3]) 1959 if len(time_items) == 3: 1960 (h, mi, s) = time_items 1961 else: 1962 (h, mi, s) = time_items + [0] 1963 value = datetime.time(h, mi, s) 1964 return value
1965
1966 - def parse_datetime(self, value, field_type):
1967 if not isinstance(value, datetime.datetime): 1968 value = str(value) 1969 date_part,time_part,timezone = value[:10],value[11:19],value[19:] 1970 if '+' in timezone: 1971 ms,tz = timezone.split('+') 1972 h,m = tz.split(':') 1973 dt = datetime.timedelta(seconds=3600*int(h)+60*int(m)) 1974 elif '-' in timezone: 1975 ms,tz = timezone.split('-') 1976 h,m = tz.split(':') 1977 dt = -datetime.timedelta(seconds=3600*int(h)+60*int(m)) 1978 else: 1979 dt = None 1980 (y, m, d) = map(int,date_part.split('-')) 1981 time_parts = time_part and time_part.split(':')[:3] or (0,0,0) 1982 while len(time_parts)<3: time_parts.append(0) 1983 time_items = map(int,time_parts) 1984 (h, mi, s) = time_items 1985 value = datetime.datetime(y, m, d, h, mi, s) 1986 if dt: 1987 value = value + dt 1988 return value
1989
1990 - def parse_blob(self, value, field_type):
1991 return base64.b64decode(str(value))
1992
1993 - def parse_decimal(self, value, field_type):
1994 decimals = int(field_type[8:-1].split(',')[-1]) 1995 if self.dbengine in ('sqlite', 'spatialite'): 1996 value = ('%.' + str(decimals) + 'f') % value 1997 if not isinstance(value, decimal.Decimal): 1998 value = decimal.Decimal(str(value)) 1999 return value
2000
2001 - def parse_list_integers(self, value, field_type):
2002 if not isinstance(self, NoSQLAdapter): 2003 value = bar_decode_integer(value) 2004 return value
2005
2006 - def parse_list_references(self, value, field_type):
2007 if not isinstance(self, NoSQLAdapter): 2008 value = bar_decode_integer(value) 2009 return [self.parse_reference(r, field_type[5:]) for r in value]
2010
2011 - def parse_list_strings(self, value, field_type):
2012 if not isinstance(self, NoSQLAdapter): 2013 value = bar_decode_string(value) 2014 return value
2015
2016 - def parse_id(self, value, field_type):
2017 return long(value)
2018
2019 - def parse_integer(self, value, field_type):
2020 return long(value)
2021
2022 - def parse_double(self, value, field_type):
2023 return float(value)
2024
2025 - def parse_json(self, value, field_type):
2026 if not self.native_json: 2027 if not isinstance(value, basestring): 2028 raise RuntimeError('json data not a string') 2029 if isinstance(value, unicode): 2030 value = value.encode('utf-8') 2031 if have_serializers: 2032 value = serializers.loads_json(value) 2033 elif simplejson: 2034 value = simplejson.loads(value) 2035 else: 2036 raise RuntimeError("missing simplejson") 2037 return value
2038
2039 - def build_parsemap(self):
2040 self.parsemap = { 2041 'id':self.parse_id, 2042 'integer':self.parse_integer, 2043 'bigint':self.parse_integer, 2044 'float':self.parse_double, 2045 'double':self.parse_double, 2046 'reference':self.parse_reference, 2047 'boolean':self.parse_boolean, 2048 'date':self.parse_date, 2049 'time':self.parse_time, 2050 'datetime':self.parse_datetime, 2051 'blob':self.parse_blob, 2052 'decimal':self.parse_decimal, 2053 'json':self.parse_json, 2054 'list:integer':self.parse_list_integers, 2055 'list:reference':self.parse_list_references, 2056 'list:string':self.parse_list_strings, 2057 }
2058
2059 - def parse(self, rows, fields, colnames, blob_decode=True, 2060 cacheable = False):
2061 self.build_parsemap() 2062 db = self.db 2063 virtualtables = [] 2064 new_rows = [] 2065 tmps = [] 2066 for colname in colnames: 2067 if not REGEX_TABLE_DOT_FIELD.match(colname): 2068 tmps.append(None) 2069 else: 2070 (tablename, fieldname) = colname.split('.') 2071 table = db[tablename] 2072 field = table[fieldname] 2073 ft = field.type 2074 tmps.append((tablename,fieldname,table,field,ft)) 2075 for (i,row) in enumerate(rows): 2076 new_row = Row() 2077 for (j,colname) in enumerate(colnames): 2078 value = row[j] 2079 tmp = tmps[j] 2080 if tmp: 2081 (tablename,fieldname,table,field,ft) = tmp 2082 if tablename in new_row: 2083 colset = new_row[tablename] 2084 else: 2085 colset = new_row[tablename] = Row() 2086 if tablename not in virtualtables: 2087 virtualtables.append(tablename) 2088 value = self.parse_value(value,ft,blob_decode) 2089 if field.filter_out: 2090 value = field.filter_out(value) 2091 colset[fieldname] = value 2092 2093 # for backward compatibility 2094 if ft=='id' and fieldname!='id' and \ 2095 not 'id' in table.fields: 2096 colset['id'] = value 2097 2098 if ft == 'id' and not cacheable: 2099 # temporary hack to deal with 2100 # GoogleDatastoreAdapter 2101 # references 2102 if isinstance(self, GoogleDatastoreAdapter): 2103 id = value.key().id_or_name() 2104 colset[fieldname] = id 2105 colset.gae_item = value 2106 else: 2107 id = value 2108 colset.update_record = RecordUpdater(colset,table,id) 2109 colset.delete_record = RecordDeleter(table,id) 2110 for rfield in table._referenced_by: 2111 referee_link = db._referee_name and \ 2112 db._referee_name % dict( 2113 table=rfield.tablename,field=rfield.name) 2114 if referee_link and not referee_link in colset: 2115 colset[referee_link] = LazySet(rfield,id) 2116 else: 2117 if not '_extra' in new_row: 2118 new_row['_extra'] = Row() 2119 new_row['_extra'][colname] = \ 2120 self.parse_value(value, 2121 fields[j].type,blob_decode) 2122 new_column_name = \ 2123 REGEX_SELECT_AS_PARSER.search(colname) 2124 if not new_column_name is None: 2125 column_name = new_column_name.groups(0) 2126 setattr(new_row,column_name[0],value) 2127 new_rows.append(new_row) 2128 rowsobj = Rows(db, new_rows, colnames, rawrows=rows) 2129 2130 2131 for tablename in virtualtables: 2132 table = db[tablename] 2133 fields_virtual = [(f,v) for (f,v) in table.iteritems() 2134 if isinstance(v,FieldVirtual)] 2135 fields_lazy = [(f,v) for (f,v) in table.iteritems() 2136 if isinstance(v,FieldMethod)] 2137 if fields_virtual or fields_lazy: 2138 for row in rowsobj.records: 2139 box = row[tablename] 2140 for f,v in fields_virtual: 2141 box[f] = v.f(row) 2142 for f,v in fields_lazy: 2143 box[f] = (v.handler or VirtualCommand)(v.f,row) 2144 2145 ### old style virtual fields 2146 for item in table.virtualfields: 2147 try: 2148 rowsobj = rowsobj.setvirtualfields(**{tablename:item}) 2149 except (KeyError, AttributeError): 2150 # to avoid breaking virtualfields when partial select 2151 pass 2152 return rowsobj
2153
2154 - def common_filter(self, query, tablenames):
2155 tenant_fieldname = self.db._request_tenant 2156 2157 for tablename in tablenames: 2158 table = self.db[tablename] 2159 2160 # deal with user provided filters 2161 if table._common_filter != None: 2162 query = query & table._common_filter(query) 2163 2164 # deal with multi_tenant filters 2165 if tenant_fieldname in table: 2166 default = table[tenant_fieldname].default 2167 if not default is None: 2168 newquery = table[tenant_fieldname] == default 2169 if query is None: 2170 query = newquery 2171 else: 2172 query = query & newquery 2173 return query
2174
2175 - def CASE(self,query,t,f):
2176 def represent(x): 2177 types = {type(True):'boolean',type(0):'integer',type(1.0):'double'} 2178 if x is None: return 'NULL' 2179 elif isinstance(x,Expression): return str(x) 2180 else: return self.represent(x,types.get(type(x),'string'))
2181 return Expression(self.db,'CASE WHEN %s THEN %s ELSE %s END' % \ 2182 (self.expand(query),represent(t),represent(f))) 2183
2184 ################################################################################### 2185 # List of all the available adapters; they all extend BaseAdapter. 2186 ################################################################################### 2187 2188 -class SQLiteAdapter(BaseAdapter):
2189 drivers = ('sqlite2','sqlite3') 2190 2191 can_select_for_update = None # support ourselves with BEGIN TRANSACTION 2192
2193 - def EXTRACT(self,field,what):
2194 return "web2py_extract('%s',%s)" % (what, self.expand(field))
2195 2196 @staticmethod
2197 - def web2py_extract(lookup, s):
2198 table = { 2199 'year': (0, 4), 2200 'month': (5, 7), 2201 'day': (8, 10), 2202 'hour': (11, 13), 2203 'minute': (14, 16), 2204 'second': (17, 19), 2205 } 2206 try: 2207 if lookup != 'epoch': 2208 (i, j) = table[lookup] 2209 return int(s[i:j]) 2210 else: 2211 return time.mktime(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S').timetuple()) 2212 except: 2213 return None
2214 2215 @staticmethod
2216 - def web2py_regexp(expression, item):
2217 return re.compile(expression).search(item) is not None
2218
2219 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2220 credential_decoder=IDENTITY, driver_args={}, 2221 adapter_args={}, do_connect=True, after_connection=None):
2222 self.db = db 2223 self.dbengine = "sqlite" 2224 self.uri = uri 2225 if do_connect: self.find_driver(adapter_args) 2226 self.pool_size = 0 2227 self.folder = folder 2228 self.db_codec = db_codec 2229 self._after_connection = after_connection 2230 self.find_or_make_work_folder() 2231 path_encoding = sys.getfilesystemencoding() \ 2232 or locale.getdefaultlocale()[1] or 'utf8' 2233 if uri.startswith('sqlite:memory'): 2234 self.dbpath = ':memory:' 2235 else: 2236 self.dbpath = uri.split('://',1)[1] 2237 if self.dbpath[0] != '/': 2238 if PYTHON_VERSION == 2: 2239 self.dbpath = pjoin( 2240 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2241 else: 2242 self.dbpath = pjoin(self.folder, self.dbpath) 2243 if not 'check_same_thread' in driver_args: 2244 driver_args['check_same_thread'] = False 2245 if not 'detect_types' in driver_args and do_connect: 2246 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2247 def connector(dbpath=self.dbpath, driver_args=driver_args): 2248 return self.driver.Connection(dbpath, **driver_args)
2249 self.connector = connector 2250 if do_connect: self.reconnect()
2251
2252 - def after_connection(self):
2253 self.connection.create_function('web2py_extract', 2, 2254 SQLiteAdapter.web2py_extract) 2255 self.connection.create_function("REGEXP", 2, 2256 SQLiteAdapter.web2py_regexp)
2257
2258 - def _truncate(self, table, mode=''):
2259 tablename = table._tablename 2260 return ['DELETE FROM %s;' % tablename, 2261 "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
2262
2263 - def lastrowid(self, table):
2264 return self.cursor.lastrowid
2265
2266 - def REGEXP(self,first,second):
2267 return '(%s REGEXP %s)' % (self.expand(first), 2268 self.expand(second,'string'))
2269
2270 - def select(self, query, fields, attributes):
2271 """ 2272 Simulate SELECT ... FOR UPDATE with BEGIN IMMEDIATE TRANSACTION. 2273 Note that the entire database, rather than one record, is locked 2274 (it will be locked eventually anyway by the following UPDATE). 2275 """ 2276 if attributes.get('for_update', False) and not 'cache' in attributes: 2277 self.execute('BEGIN IMMEDIATE TRANSACTION;') 2278 return super(SQLiteAdapter, self).select(query, fields, attributes)
2279
2280 -class SpatiaLiteAdapter(SQLiteAdapter):
2281 drivers = ('sqlite3','sqlite2') 2282 2283 types = copy.copy(BaseAdapter.types) 2284 types.update(geometry='GEOMETRY') 2285
2286 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2287 credential_decoder=IDENTITY, driver_args={}, 2288 adapter_args={}, do_connect=True, srid=4326, after_connection=None):
2289 self.db = db 2290 self.dbengine = "spatialite" 2291 self.uri = uri 2292 if do_connect: self.find_driver(adapter_args) 2293 self.pool_size = 0 2294 self.folder = folder 2295 self.db_codec = db_codec 2296 self._after_connection = after_connection 2297 self.find_or_make_work_folder() 2298 self.srid = srid 2299 path_encoding = sys.getfilesystemencoding() \ 2300 or locale.getdefaultlocale()[1] or 'utf8' 2301 if uri.startswith('spatialite:memory'): 2302 self.dbpath = ':memory:' 2303 else: 2304 self.dbpath = uri.split('://',1)[1] 2305 if self.dbpath[0] != '/': 2306 self.dbpath = pjoin( 2307 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2308 if not 'check_same_thread' in driver_args: 2309 driver_args['check_same_thread'] = False 2310 if not 'detect_types' in driver_args and do_connect: 2311 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2312 def connector(dbpath=self.dbpath, driver_args=driver_args): 2313 return self.driver.Connection(dbpath, **driver_args)
2314 self.connector = connector 2315 if do_connect: self.reconnect()
2316
2317 - def after_connection(self):
2318 self.connection.enable_load_extension(True) 2319 # for Windows, rename libspatialite-2.dll to libspatialite.dll 2320 # Linux uses libspatialite.so 2321 # Mac OS X uses libspatialite.dylib 2322 libspatialite = SPATIALLIBS[platform.system()] 2323 self.execute(r'SELECT load_extension("%s");' % libspatialite) 2324 2325 self.connection.create_function('web2py_extract', 2, 2326 SQLiteAdapter.web2py_extract) 2327 self.connection.create_function("REGEXP", 2, 2328 SQLiteAdapter.web2py_regexp)
2329 2330 # GIS functions 2331
2332 - def ST_ASGEOJSON(self, first, second):
2333 return 'AsGeoJSON(%s,%s,%s)' %(self.expand(first), 2334 second['precision'], second['options'])
2335
2336 - def ST_ASTEXT(self, first):
2337 return 'AsText(%s)' %(self.expand(first))
2338
2339 - def ST_CONTAINS(self, first, second):
2340 return 'Contains(%s,%s)' %(self.expand(first), 2341 self.expand(second, first.type))
2342
2343 - def ST_DISTANCE(self, first, second):
2344 return 'Distance(%s,%s)' %(self.expand(first), 2345 self.expand(second, first.type))
2346
2347 - def ST_EQUALS(self, first, second):
2348 return 'Equals(%s,%s)' %(self.expand(first), 2349 self.expand(second, first.type))
2350
2351 - def ST_INTERSECTS(self, first, second):
2352 return 'Intersects(%s,%s)' %(self.expand(first), 2353 self.expand(second, first.type))
2354
2355 - def ST_OVERLAPS(self, first, second):
2356 return 'Overlaps(%s,%s)' %(self.expand(first), 2357 self.expand(second, first.type))
2358
2359 - def ST_SIMPLIFY(self, first, second):
2360 return 'Simplify(%s,%s)' %(self.expand(first), 2361 self.expand(second, 'double'))
2362
2363 - def ST_TOUCHES(self, first, second):
2364 return 'Touches(%s,%s)' %(self.expand(first), 2365 self.expand(second, first.type))
2366
2367 - def ST_WITHIN(self, first, second):
2368 return 'Within(%s,%s)' %(self.expand(first), 2369 self.expand(second, first.type))
2370
2371 - def represent(self, obj, fieldtype):
2372 field_is_type = fieldtype.startswith 2373 if field_is_type('geo'): 2374 srid = 4326 # Spatialite default srid for geometry 2375 geotype, parms = fieldtype[:-1].split('(') 2376 parms = parms.split(',') 2377 if len(parms) >= 2: 2378 schema, srid = parms[:2] 2379 # if field_is_type('geometry'): 2380 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2381 # elif field_is_type('geography'): 2382 # value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2383 # else: 2384 # raise SyntaxError, 'Invalid field type %s' %fieldtype 2385 return value 2386 return BaseAdapter.represent(self, obj, fieldtype)
2387
2388 2389 -class JDBCSQLiteAdapter(SQLiteAdapter):
2390 drivers = ('zxJDBC_sqlite',) 2391
2392 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 2393 credential_decoder=IDENTITY, driver_args={}, 2394 adapter_args={}, do_connect=True, after_connection=None):
2395 self.db = db 2396 self.dbengine = "sqlite" 2397 self.uri = uri 2398 if do_connect: self.find_driver(adapter_args) 2399 self.pool_size = pool_size 2400 self.folder = folder 2401 self.db_codec = db_codec 2402 self._after_connection = after_connection 2403 self.find_or_make_work_folder() 2404 path_encoding = sys.getfilesystemencoding() \ 2405 or locale.getdefaultlocale()[1] or 'utf8' 2406 if uri.startswith('sqlite:memory'): 2407 self.dbpath = ':memory:' 2408 else: 2409 self.dbpath = uri.split('://',1)[1] 2410 if self.dbpath[0] != '/': 2411 self.dbpath = pjoin( 2412 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2413 def connector(dbpath=self.dbpath,driver_args=driver_args): 2414 return self.driver.connect( 2415 self.driver.getConnection('jdbc:sqlite:'+dbpath), 2416 **driver_args)
2417 self.connector = connector 2418 if do_connect: self.reconnect()
2419
2420 - def after_connection(self):
2421 # FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs 2422 self.connection.create_function('web2py_extract', 2, 2423 SQLiteAdapter.web2py_extract)
2424
2425 - def execute(self, a):
2426 return self.log_execute(a)
2427
2428 2429 -class MySQLAdapter(BaseAdapter):
2430 drivers = ('MySQLdb','pymysql') 2431 2432 commit_on_alter_table = True 2433 support_distributed_transaction = True 2434 types = { 2435 'boolean': 'CHAR(1)', 2436 'string': 'VARCHAR(%(length)s)', 2437 'text': 'LONGTEXT', 2438 'json': 'LONGTEXT', 2439 'password': 'VARCHAR(%(length)s)', 2440 'blob': 'LONGBLOB', 2441 'upload': 'VARCHAR(%(length)s)', 2442 'integer': 'INT', 2443 'bigint': 'BIGINT', 2444 'float': 'FLOAT', 2445 'double': 'DOUBLE', 2446 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2447 'date': 'DATE', 2448 'time': 'TIME', 2449 'datetime': 'DATETIME', 2450 'id': 'INT AUTO_INCREMENT NOT NULL', 2451 'reference': 'INT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2452 'list:integer': 'LONGTEXT', 2453 'list:string': 'LONGTEXT', 2454 'list:reference': 'LONGTEXT', 2455 'big-id': 'BIGINT AUTO_INCREMENT NOT NULL', 2456 'big-reference': 'BIGINT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2457 } 2458 2459 QUOTE_TEMPLATE = "`%s`" 2460
2461 - def varquote(self,name):
2462 return varquote_aux(name,'`%s`')
2463
2464 - def RANDOM(self):
2465 return 'RAND()'
2466
2467 - def SUBSTRING(self,field,parameters):
2468 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), 2469 parameters[0], parameters[1])
2470
2471 - def EPOCH(self, first):
2472 return "UNIX_TIMESTAMP(%s)" % self.expand(first)
2473
2474 - def CONCAT(self, *items):
2475 return 'CONCAT(%s)' % ','.join(self.expand(x,'string') for x in items)
2476
2477 - def REGEXP(self,first,second):
2478 return '(%s REGEXP %s)' % (self.expand(first), 2479 self.expand(second,'string'))
2480
2481 - def _drop(self,table,mode):
2482 # breaks db integrity but without this mysql does not drop table 2483 return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table, 2484 'SET FOREIGN_KEY_CHECKS=1;']
2485
2486 - def _insert_empty(self, table):
2487 return 'INSERT INTO %s VALUES (DEFAULT);' % table
2488
2489 - def distributed_transaction_begin(self,key):
2490 self.execute('XA START;')
2491
2492 - def prepare(self,key):
2493 self.execute("XA END;") 2494 self.execute("XA PREPARE;")
2495
2496 - def commit_prepared(self,ley):
2497 self.execute("XA COMMIT;")
2498
2499 - def rollback_prepared(self,key):
2500 self.execute("XA ROLLBACK;")
2501 2502 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 2503
2504 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2505 credential_decoder=IDENTITY, driver_args={}, 2506 adapter_args={}, do_connect=True, after_connection=None):
2507 self.db = db 2508 self.dbengine = "mysql" 2509 self.uri = uri 2510 if do_connect: self.find_driver(adapter_args,uri) 2511 self.pool_size = pool_size 2512 self.folder = folder 2513 self.db_codec = db_codec 2514 self._after_connection = after_connection 2515 self.find_or_make_work_folder() 2516 ruri = uri.split('://',1)[1] 2517 m = self.REGEX_URI.match(ruri) 2518 if not m: 2519 raise SyntaxError( 2520 "Invalid URI string in DAL: %s" % self.uri) 2521 user = credential_decoder(m.group('user')) 2522 if not user: 2523 raise SyntaxError('User required') 2524 password = credential_decoder(m.group('password')) 2525 if not password: 2526 password = '' 2527 host = m.group('host') 2528 if not host: 2529 raise SyntaxError('Host name required') 2530 db = m.group('db') 2531 if not db: 2532 raise SyntaxError('Database name required') 2533 port = int(m.group('port') or '3306') 2534 charset = m.group('charset') or 'utf8' 2535 driver_args.update(db=db, 2536 user=credential_decoder(user), 2537 passwd=credential_decoder(password), 2538 host=host, 2539 port=port, 2540 charset=charset) 2541 2542 2543 def connector(driver_args=driver_args): 2544 return self.driver.connect(**driver_args)
2545 self.connector = connector 2546 if do_connect: self.reconnect()
2547
2548 - def after_connection(self):
2549 self.execute('SET FOREIGN_KEY_CHECKS=1;') 2550 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
2551
2552 - def lastrowid(self,table):
2553 self.execute('select last_insert_id();') 2554 return int(self.cursor.fetchone()[0])
2555
2556 2557 -class PostgreSQLAdapter(BaseAdapter):
2558 drivers = ('psycopg2','pg8000') 2559 2560 support_distributed_transaction = True 2561 types = { 2562 'boolean': 'CHAR(1)', 2563 'string': 'VARCHAR(%(length)s)', 2564 'text': 'TEXT', 2565 'json': 'TEXT', 2566 'password': 'VARCHAR(%(length)s)', 2567 'blob': 'BYTEA', 2568 'upload': 'VARCHAR(%(length)s)', 2569 'integer': 'INTEGER', 2570 'bigint': 'BIGINT', 2571 'float': 'FLOAT', 2572 'double': 'FLOAT8', 2573 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2574 'date': 'DATE', 2575 'time': 'TIME', 2576 'datetime': 'TIMESTAMP', 2577 'id': 'SERIAL PRIMARY KEY', 2578 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2579 'list:integer': 'TEXT', 2580 'list:string': 'TEXT', 2581 'list:reference': 'TEXT', 2582 'geometry': 'GEOMETRY', 2583 'geography': 'GEOGRAPHY', 2584 'big-id': 'BIGSERIAL PRIMARY KEY', 2585 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2586 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2587 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2588 2589 } 2590 2591 QUOTE_TEMPLATE = '%s' 2592
2593 - def varquote(self,name):
2594 return varquote_aux(name,'"%s"')
2595
2596 - def adapt(self,obj):
2597 if self.driver_name == 'psycopg2': 2598 return psycopg2_adapt(obj).getquoted() 2599 elif self.driver_name == 'pg8000': 2600 return "'%s'" % str(obj).replace("%","%%").replace("'","''") 2601 else: 2602 return "'%s'" % str(obj).replace("'","''")
2603
2604 - def sequence_name(self,table):
2605 return '%s_id_Seq' % table
2606
2607 - def RANDOM(self):
2608 return 'RANDOM()'
2609
2610 - def ADD(self, first, second):
2611 t = first.type 2612 if t in ('text','string','password', 'json', 'upload','blob'): 2613 return '(%s || %s)' % (self.expand(first), self.expand(second, t)) 2614 else: 2615 return '(%s + %s)' % (self.expand(first), self.expand(second, t))
2616
2617 - def distributed_transaction_begin(self,key):
2618 return
2619
2620 - def prepare(self,key):
2621 self.execute("PREPARE TRANSACTION '%s';" % key)
2622
2623 - def commit_prepared(self,key):
2624 self.execute("COMMIT PREPARED '%s';" % key)
2625
2626 - def rollback_prepared(self,key):
2627 self.execute("ROLLBACK PREPARED '%s';" % key)
2628
2629 - def create_sequence_and_triggers(self, query, table, **args):
2630 # following lines should only be executed if table._sequence_name does not exist 2631 # self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 2632 # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 2633 # % (table._tablename, table._fieldname, table._sequence_name)) 2634 self.execute(query)
2635 2636 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 2637
2638 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2639 credential_decoder=IDENTITY, driver_args={}, 2640 adapter_args={}, do_connect=True, srid=4326, 2641 after_connection=None):
2642 self.db = db 2643 self.dbengine = "postgres" 2644 self.uri = uri 2645 if do_connect: self.find_driver(adapter_args,uri) 2646 self.pool_size = pool_size 2647 self.folder = folder 2648 self.db_codec = db_codec 2649 self._after_connection = after_connection 2650 self.srid = srid 2651 self.find_or_make_work_folder() 2652 ruri = uri.split('://',1)[1] 2653 m = self.REGEX_URI.match(ruri) 2654 if not m: 2655 raise SyntaxError("Invalid URI string in DAL") 2656 user = credential_decoder(m.group('user')) 2657 if not user: 2658 raise SyntaxError('User required') 2659 password = credential_decoder(m.group('password')) 2660 if not password: 2661 password = '' 2662 host = m.group('host') 2663 if not host: 2664 raise SyntaxError('Host name required') 2665 db = m.group('db') 2666 if not db: 2667 raise SyntaxError('Database name required') 2668 port = m.group('port') or '5432' 2669 sslmode = m.group('sslmode') 2670 if sslmode: 2671 msg = ("dbname='%s' user='%s' host='%s' " 2672 "port=%s password='%s' sslmode='%s'") \ 2673 % (db, user, host, port, password, sslmode) 2674 else: 2675 msg = ("dbname='%s' user='%s' host='%s' " 2676 "port=%s password='%s'") \ 2677 % (db, user, host, port, password) 2678 # choose diver according uri 2679 if self.driver: 2680 self.__version__ = "%s %s" % (self.driver.__name__, 2681 self.driver.__version__) 2682 else: 2683 self.__version__ = None 2684 def connector(msg=msg,driver_args=driver_args): 2685 return self.driver.connect(msg,**driver_args)
2686 self.connector = connector 2687 if do_connect: self.reconnect()
2688
2689 - def after_connection(self):
2690 self.connection.set_client_encoding('UTF8') 2691 self.execute("SET standard_conforming_strings=on;") 2692 self.try_json()
2693
2694 - def lastrowid(self,table):
2695 self.execute("select currval('%s')" % table._sequence_name) 2696 return int(self.cursor.fetchone()[0])
2697
2698 - def try_json(self):
2699 # check JSON data type support 2700 # (to be added to after_connection) 2701 if self.driver_name == "pg8000": 2702 supports_json = self.connection.server_version >= "9.2.0" 2703 elif (self.driver_name == "psycopg2") and \ 2704 (self.driver.__version__ >= "2.0.12"): 2705 supports_json = self.connection.server_version >= 90200 2706 elif self.driver_name == "zxJDBC": 2707 supports_json = self.connection.dbversion >= "9.2.0" 2708 else: supports_json = None 2709 if supports_json: 2710 self.types["json"] = "JSON" 2711 self.native_json = True 2712 else: LOGGER.debug("Your database version does not support the JSON data type (using TEXT instead)")
2713
2714 - def LIKE(self,first,second):
2715 args = (self.expand(first), self.expand(second,'string')) 2716 if not first.type in ('string', 'text', 'json'): 2717 return '(CAST(%s AS CHAR(%s)) LIKE %s)' % (args[0], first.length, args[1]) 2718 else: 2719 return '(%s LIKE %s)' % args
2720
2721 - def ILIKE(self,first,second):
2722 args = (self.expand(first), self.expand(second,'string')) 2723 if not first.type in ('string', 'text', 'json'): 2724 return '(CAST(%s AS CHAR(%s)) LIKE %s)' % (args[0], first.length, args[1]) 2725 else: 2726 return '(%s ILIKE %s)' % args
2727
2728 - def REGEXP(self,first,second):
2729 return '(%s ~ %s)' % (self.expand(first), 2730 self.expand(second,'string'))
2731
2732 - def STARTSWITH(self,first,second):
2733 return '(%s ILIKE %s)' % (self.expand(first), 2734 self.expand(second+'%','string'))
2735
2736 - def ENDSWITH(self,first,second):
2737 return '(%s ILIKE %s)' % (self.expand(first), 2738 self.expand('%'+second,'string'))
2739 2740 # GIS functions 2741
2742 - def ST_ASGEOJSON(self, first, second):
2743 """ 2744 http://postgis.org/docs/ST_AsGeoJSON.html 2745 """ 2746 return 'ST_AsGeoJSON(%s,%s,%s,%s)' %(second['version'], 2747 self.expand(first), second['precision'], second['options'])
2748
2749 - def ST_ASTEXT(self, first):
2750 """ 2751 http://postgis.org/docs/ST_AsText.html 2752 """ 2753 return 'ST_AsText(%s)' %(self.expand(first))
2754
2755 - def ST_X(self, first):
2756 """ 2757 http://postgis.org/docs/ST_X.html 2758 """ 2759 return 'ST_X(%s)' %(self.expand(first))
2760
2761 - def ST_Y(self, first):
2762 """ 2763 http://postgis.org/docs/ST_Y.html 2764 """ 2765 return 'ST_Y(%s)' %(self.expand(first))
2766
2767 - def ST_CONTAINS(self, first, second):
2768 """ 2769 http://postgis.org/docs/ST_Contains.html 2770 """ 2771 return 'ST_Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2772
2773 - def ST_DISTANCE(self, first, second):
2774 """ 2775 http://postgis.org/docs/ST_Distance.html 2776 """ 2777 return 'ST_Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2778
2779 - def ST_EQUALS(self, first, second):
2780 """ 2781 http://postgis.org/docs/ST_Equals.html 2782 """ 2783 return 'ST_Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2784
2785 - def ST_INTERSECTS(self, first, second):
2786 """ 2787 http://postgis.org/docs/ST_Intersects.html 2788 """ 2789 return 'ST_Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2790
2791 - def ST_OVERLAPS(self, first, second):
2792 """ 2793 http://postgis.org/docs/ST_Overlaps.html 2794 """ 2795 return 'ST_Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2796
2797 - def ST_SIMPLIFY(self, first, second):
2798 """ 2799 http://postgis.org/docs/ST_Simplify.html 2800 """ 2801 return 'ST_Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double'))
2802
2803 - def ST_TOUCHES(self, first, second):
2804 """ 2805 http://postgis.org/docs/ST_Touches.html 2806 """ 2807 return 'ST_Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2808
2809 - def ST_WITHIN(self, first, second):
2810 """ 2811 http://postgis.org/docs/ST_Within.html 2812 """ 2813 return 'ST_Within(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2814
2815 - def represent(self, obj, fieldtype):
2816 field_is_type = fieldtype.startswith 2817 if field_is_type('geo'): 2818 srid = 4326 # postGIS default srid for geometry 2819 geotype, parms = fieldtype[:-1].split('(') 2820 parms = parms.split(',') 2821 if len(parms) >= 2: 2822 schema, srid = parms[:2] 2823 if field_is_type('geometry'): 2824 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2825 elif field_is_type('geography'): 2826 value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2827 # else: 2828 # raise SyntaxError('Invalid field type %s' %fieldtype) 2829 return value 2830 return BaseAdapter.represent(self, obj, fieldtype)
2831
2832 -class NewPostgreSQLAdapter(PostgreSQLAdapter):
2833 drivers = ('psycopg2','pg8000') 2834 2835 types = { 2836 'boolean': 'CHAR(1)', 2837 'string': 'VARCHAR(%(length)s)', 2838 'text': 'TEXT', 2839 'json': 'TEXT', 2840 'password': 'VARCHAR(%(length)s)', 2841 'blob': 'BYTEA', 2842 'upload': 'VARCHAR(%(length)s)', 2843 'integer': 'INTEGER', 2844 'bigint': 'BIGINT', 2845 'float': 'FLOAT', 2846 'double': 'FLOAT8', 2847 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2848 'date': 'DATE', 2849 'time': 'TIME', 2850 'datetime': 'TIMESTAMP', 2851 'id': 'SERIAL PRIMARY KEY', 2852 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2853 'list:integer': 'BIGINT[]', 2854 'list:string': 'TEXT[]', 2855 'list:reference': 'BIGINT[]', 2856 'geometry': 'GEOMETRY', 2857 'geography': 'GEOGRAPHY', 2858 'big-id': 'BIGSERIAL PRIMARY KEY', 2859 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2860 } 2861
2862 - def parse_list_integers(self, value, field_type):
2863 return value
2864
2865 - def parse_list_references(self, value, field_type):
2866 return [self.parse_reference(r, field_type[5:]) for r in value]
2867
2868 - def parse_list_strings(self, value, field_type):
2869 return value
2870
2871 - def represent(self, obj, fieldtype):
2872 field_is_type = fieldtype.startswith 2873 if field_is_type('list:'): 2874 if not obj: 2875 obj = [] 2876 elif not isinstance(obj, (list, tuple)): 2877 obj = [obj] 2878 if field_is_type('list:string'): 2879 obj = map(str,obj) 2880 else: 2881 obj = map(int,obj) 2882 return 'ARRAY[%s]' % ','.join(repr(item) for item in obj) 2883 return BaseAdapter.represent(self, obj, fieldtype)
2884
2885 2886 -class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
2887 drivers = ('zxJDBC',) 2888 2889 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 2890
2891 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2892 credential_decoder=IDENTITY, driver_args={}, 2893 adapter_args={}, do_connect=True, after_connection=None ):
2894 self.db = db 2895 self.dbengine = "postgres" 2896 self.uri = uri 2897 if do_connect: self.find_driver(adapter_args,uri) 2898 self.pool_size = pool_size 2899 self.folder = folder 2900 self.db_codec = db_codec 2901 self._after_connection = after_connection 2902 self.find_or_make_work_folder() 2903 ruri = uri.split('://',1)[1] 2904 m = self.REGEX_URI.match(ruri) 2905 if not m: 2906 raise SyntaxError("Invalid URI string in DAL") 2907 user = credential_decoder(m.group('user')) 2908 if not user: 2909 raise SyntaxError('User required') 2910 password = credential_decoder(m.group('password')) 2911 if not password: 2912 password = '' 2913 host = m.group('host') 2914 if not host: 2915 raise SyntaxError('Host name required') 2916 db = m.group('db') 2917 if not db: 2918 raise SyntaxError('Database name required') 2919 port = m.group('port') or '5432' 2920 msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) 2921 def connector(msg=msg,driver_args=driver_args): 2922 return self.driver.connect(*msg,**driver_args)
2923 self.connector = connector 2924 if do_connect: self.reconnect()
2925
2926 - def after_connection(self):
2927 self.connection.set_client_encoding('UTF8') 2928 self.execute('BEGIN;') 2929 self.execute("SET CLIENT_ENCODING TO 'UNICODE';") 2930 self.try_json()
2931
2932 2933 -class OracleAdapter(BaseAdapter):
2934 drivers = ('cx_Oracle',) 2935 2936 commit_on_alter_table = False 2937 types = { 2938 'boolean': 'CHAR(1)', 2939 'string': 'VARCHAR2(%(length)s)', 2940 'text': 'CLOB', 2941 'json': 'CLOB', 2942 'password': 'VARCHAR2(%(length)s)', 2943 'blob': 'CLOB', 2944 'upload': 'VARCHAR2(%(length)s)', 2945 'integer': 'INT', 2946 'bigint': 'NUMBER', 2947 'float': 'FLOAT', 2948 'double': 'BINARY_DOUBLE', 2949 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2950 'date': 'DATE', 2951 'time': 'CHAR(8)', 2952 'datetime': 'DATE', 2953 'id': 'NUMBER PRIMARY KEY', 2954 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2955 'list:integer': 'CLOB', 2956 'list:string': 'CLOB', 2957 'list:reference': 'CLOB', 2958 'big-id': 'NUMBER PRIMARY KEY', 2959 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2960 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2961 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2962 } 2963
2964 - def sequence_name(self,tablename):
2965 return '%s_sequence' % tablename
2966
2967 - def trigger_name(self,tablename):
2968 return '%s_trigger' % tablename
2969
2970 - def LEFT_JOIN(self):
2971 return 'LEFT OUTER JOIN'
2972
2973 - def RANDOM(self):
2974 return 'dbms_random.value'
2975
2976 - def NOT_NULL(self,default,field_type):
2977 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
2978
2979 - def _drop(self,table,mode):
2980 sequence_name = table._sequence_name 2981 return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name]
2982
2983 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2984 if limitby: 2985 (lmin, lmax) = limitby 2986 if len(sql_w) > 1: 2987 sql_w_row = sql_w + ' AND w_row > %i' % lmin 2988 else: 2989 sql_w_row = 'WHERE w_row > %i' % lmin 2990 return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 2991 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2992
2993 - def constraint_name(self, tablename, fieldname):
2994 constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) 2995 if len(constraint_name)>30: 2996 constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) 2997 return constraint_name
2998
2999 - def represent_exceptions(self, obj, fieldtype):
3000 if fieldtype == 'blob': 3001 obj = base64.b64encode(str(obj)) 3002 return ":CLOB('%s')" % obj 3003 elif fieldtype == 'date': 3004 if isinstance(obj, (datetime.date, datetime.datetime)): 3005 obj = obj.isoformat()[:10] 3006 else: 3007 obj = str(obj) 3008 return "to_date('%s','yyyy-mm-dd')" % obj 3009 elif fieldtype == 'datetime': 3010 if isinstance(obj, datetime.datetime): 3011 obj = obj.isoformat()[:19].replace('T',' ') 3012 elif isinstance(obj, datetime.date): 3013 obj = obj.isoformat()[:10]+' 00:00:00' 3014 else: 3015 obj = str(obj) 3016 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 3017 return None
3018
3019 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3020 credential_decoder=IDENTITY, driver_args={}, 3021 adapter_args={}, do_connect=True, after_connection=None):
3022 self.db = db 3023 self.dbengine = "oracle" 3024 self.uri = uri 3025 if do_connect: self.find_driver(adapter_args,uri) 3026 self.pool_size = pool_size 3027 self.folder = folder 3028 self.db_codec = db_codec 3029 self._after_connection = after_connection 3030 self.find_or_make_work_folder() 3031 ruri = uri.split('://',1)[1] 3032 if not 'threaded' in driver_args: 3033 driver_args['threaded']=True 3034 def connector(uri=ruri,driver_args=driver_args): 3035 return self.driver.connect(uri,**driver_args)
3036 self.connector = connector 3037 if do_connect: self.reconnect()
3038
3039 - def after_connection(self):
3040 self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") 3041 self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
3042 3043 oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") 3044
3045 - def execute(self, command, args=None):
3046 args = args or [] 3047 i = 1 3048 while True: 3049 m = self.oracle_fix.match(command) 3050 if not m: 3051 break 3052 command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] 3053 args.append(m.group('clob')[6:-2].replace("''", "'")) 3054 i += 1 3055 if command[-1:]==';': 3056 command = command[:-1] 3057 return self.log_execute(command, args)
3058
3059 - def create_sequence_and_triggers(self, query, table, **args):
3060 tablename = table._tablename 3061 id_name = table._id.name 3062 sequence_name = table._sequence_name 3063 trigger_name = table._trigger_name 3064 self.execute(query) 3065 self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE MINVALUE -1;' % sequence_name) 3066 self.execute(""" 3067 CREATE OR REPLACE TRIGGER %(trigger_name)s BEFORE INSERT ON %(tablename)s FOR EACH ROW 3068 DECLARE 3069 curr_val NUMBER; 3070 diff_val NUMBER; 3071 PRAGMA autonomous_transaction; 3072 BEGIN 3073 IF :NEW.%(id)s IS NOT NULL THEN 3074 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3075 diff_val := :NEW.%(id)s - curr_val - 1; 3076 IF diff_val != 0 THEN 3077 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by '|| diff_val; 3078 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3079 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by 1'; 3080 END IF; 3081 END IF; 3082 SELECT %(sequence_name)s.nextval INTO :NEW.%(id)s FROM DUAL; 3083 END; 3084 """ % dict(trigger_name=trigger_name, tablename=tablename, 3085 sequence_name=sequence_name,id=id_name))
3086
3087 - def lastrowid(self,table):
3088 sequence_name = table._sequence_name 3089 self.execute('SELECT %s.currval FROM dual;' % sequence_name) 3090 return long(self.cursor.fetchone()[0])
3091 3092 #def parse_value(self, value, field_type, blob_decode=True): 3093 # if blob_decode and isinstance(value, cx_Oracle.LOB): 3094 # try: 3095 # value = value.read() 3096 # except self.driver.ProgrammingError: 3097 # # After a subsequent fetch the LOB value is not valid anymore 3098 # pass 3099 # return BaseAdapter.parse_value(self, value, field_type, blob_decode) 3100
3101 - def _fetchall(self):
3102 if any(x[1]==cx_Oracle.CLOB for x in self.cursor.description): 3103 return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \ 3104 for c in r]) for r in self.cursor] 3105 else: 3106 return self.cursor.fetchall()
3107
3108 -class MSSQLAdapter(BaseAdapter):
3109 drivers = ('pyodbc',) 3110 T_SEP = 'T' 3111 3112 QUOTE_TEMPLATE = "[%s]" 3113 3114 types = { 3115 'boolean': 'BIT', 3116 'string': 'VARCHAR(%(length)s)', 3117 'text': 'TEXT', 3118 'json': 'TEXT', 3119 'password': 'VARCHAR(%(length)s)', 3120 'blob': 'IMAGE', 3121 'upload': 'VARCHAR(%(length)s)', 3122 'integer': 'INT', 3123 'bigint': 'BIGINT', 3124 'float': 'FLOAT', 3125 'double': 'FLOAT', 3126 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3127 'date': 'DATETIME', 3128 'time': 'CHAR(8)', 3129 'datetime': 'DATETIME', 3130 'id': 'INT IDENTITY PRIMARY KEY', 3131 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3132 'list:integer': 'TEXT', 3133 'list:string': 'TEXT', 3134 'list:reference': 'TEXT', 3135 'geometry': 'geometry', 3136 'geography': 'geography', 3137 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3138 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3139 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3140 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3141 } 3142
3143 - def concat_add(self,tablename):
3144 return '; ALTER TABLE %s ADD ' % tablename
3145
3146 - def varquote(self,name):
3147 return varquote_aux(name,'[%s]')
3148
3149 - def EXTRACT(self,field,what):
3150 return "DATEPART(%s,%s)" % (what, self.expand(field))
3151
3152 - def LEFT_JOIN(self):
3153 return 'LEFT OUTER JOIN'
3154
3155 - def RANDOM(self):
3156 return 'NEWID()'
3157
3158 - def ALLOW_NULL(self):
3159 return ' NULL'
3160
3161 - def SUBSTRING(self,field,parameters):
3162 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
3163
3164 - def PRIMARY_KEY(self,key):
3165 return 'PRIMARY KEY CLUSTERED (%s)' % key
3166
3167 - def AGGREGATE(self, first, what):
3168 if what == 'LENGTH': 3169 what = 'LEN' 3170 return "%s(%s)" % (what, self.expand(first))
3171 3172
3173 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3174 if limitby: 3175 (lmin, lmax) = limitby 3176 sql_s += ' TOP %i' % lmax 3177 if 'GROUP BY' in sql_o: 3178 orderfound = sql_o.find('ORDER BY ') 3179 if orderfound >= 0: 3180 sql_o = sql_o[:orderfound] 3181 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3182 3183 TRUE = 1 3184 FALSE = 0 3185 3186 REGEX_DSN = re.compile('^(?P<dsn>.+)$') 3187 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$') 3188 REGEX_ARGPATTERN = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)') 3189
3190 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3191 credential_decoder=IDENTITY, driver_args={}, 3192 adapter_args={}, do_connect=True, srid=4326, 3193 after_connection=None):
3194 self.db = db 3195 self.dbengine = "mssql" 3196 self.uri = uri 3197 if do_connect: self.find_driver(adapter_args,uri) 3198 self.pool_size = pool_size 3199 self.folder = folder 3200 self.db_codec = db_codec 3201 self._after_connection = after_connection 3202 self.srid = srid 3203 self.find_or_make_work_folder() 3204 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3205 ruri = uri.split('://',1)[1] 3206 if '@' not in ruri: 3207 try: 3208 m = self.REGEX_DSN.match(ruri) 3209 if not m: 3210 raise SyntaxError( 3211 'Parsing uri string(%s) has no result' % self.uri) 3212 dsn = m.group('dsn') 3213 if not dsn: 3214 raise SyntaxError('DSN required') 3215 except SyntaxError: 3216 e = sys.exc_info()[1] 3217 LOGGER.error('NdGpatch error') 3218 raise e 3219 # was cnxn = 'DSN=%s' % dsn 3220 cnxn = dsn 3221 else: 3222 m = self.REGEX_URI.match(ruri) 3223 if not m: 3224 raise SyntaxError( 3225 "Invalid URI string in DAL: %s" % self.uri) 3226 user = credential_decoder(m.group('user')) 3227 if not user: 3228 raise SyntaxError('User required') 3229 password = credential_decoder(m.group('password')) 3230 if not password: 3231 password = '' 3232 host = m.group('host') 3233 if not host: 3234 raise SyntaxError('Host name required') 3235 db = m.group('db') 3236 if not db: 3237 raise SyntaxError('Database name required') 3238 port = m.group('port') or '1433' 3239 # Parse the optional url name-value arg pairs after the '?' 3240 # (in the form of arg1=value1&arg2=value2&...) 3241 # Default values (drivers like FreeTDS insist on uppercase parameter keys) 3242 argsdict = { 'DRIVER':'{SQL Server}' } 3243 urlargs = m.group('urlargs') or '' 3244 for argmatch in self.REGEX_ARGPATTERN.finditer(urlargs): 3245 argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') 3246 urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.iteritems()]) 3247 cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ 3248 % (host, port, db, user, password, urlargs) 3249 def connector(cnxn=cnxn,driver_args=driver_args): 3250 return self.driver.connect(cnxn,**driver_args)
3251 self.connector = connector 3252 if do_connect: self.reconnect()
3253
3254 - def lastrowid(self,table):
3255 #self.execute('SELECT @@IDENTITY;') 3256 self.execute('SELECT SCOPE_IDENTITY();') 3257 return long(self.cursor.fetchone()[0])
3258
3259 - def rowslice(self,rows,minimum=0,maximum=None):
3260 if maximum is None: 3261 return rows[minimum:] 3262 return rows[minimum:maximum]
3263
3264 - def EPOCH(self, first):
3265 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3266
3267 - def CONCAT(self, *items):
3268 return '(%s)' % ' + '.join(self.expand(x,'string') for x in items)
3269 3270 # GIS Spatial Extensions 3271 3272 # No STAsGeoJSON in MSSQL 3273
3274 - def ST_ASTEXT(self, first):
3275 return '%s.STAsText()' %(self.expand(first))
3276
3277 - def ST_CONTAINS(self, first, second):
3278 return '%s.STContains(%s)=1' %(self.expand(first), self.expand(second, first.type))
3279
3280 - def ST_DISTANCE(self, first, second):
3281 return '%s.STDistance(%s)' %(self.expand(first), self.expand(second, first.type))
3282
3283 - def ST_EQUALS(self, first, second):
3284 return '%s.STEquals(%s)=1' %(self.expand(first), self.expand(second, first.type))
3285
3286 - def ST_INTERSECTS(self, first, second):
3287 return '%s.STIntersects(%s)=1' %(self.expand(first), self.expand(second, first.type))
3288
3289 - def ST_OVERLAPS(self, first, second):
3290 return '%s.STOverlaps(%s)=1' %(self.expand(first), self.expand(second, first.type))
3291 3292 # no STSimplify in MSSQL 3293
3294 - def ST_TOUCHES(self, first, second):
3295 return '%s.STTouches(%s)=1' %(self.expand(first), self.expand(second, first.type))
3296
3297 - def ST_WITHIN(self, first, second):
3298 return '%s.STWithin(%s)=1' %(self.expand(first), self.expand(second, first.type))
3299
3300 - def represent(self, obj, fieldtype):
3301 field_is_type = fieldtype.startswith 3302 if field_is_type('geometry'): 3303 srid = 0 # MS SQL default srid for geometry 3304 geotype, parms = fieldtype[:-1].split('(') 3305 if parms: 3306 srid = parms 3307 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3308 elif fieldtype == 'geography': 3309 srid = 4326 # MS SQL default srid for geography 3310 geotype, parms = fieldtype[:-1].split('(') 3311 if parms: 3312 srid = parms 3313 return "geography::STGeomFromText('%s',%s)" %(obj, srid) 3314 # else: 3315 # raise SyntaxError('Invalid field type %s' %fieldtype) 3316 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3317 return BaseAdapter.represent(self, obj, fieldtype)
3318
3319 3320 -class MSSQL3Adapter(MSSQLAdapter):
3321 """ experimental support for pagination in MSSQL"""
3322 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3323 if limitby: 3324 (lmin, lmax) = limitby 3325 if lmin == 0: 3326 sql_s += ' TOP %i' % lmax 3327 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) 3328 lmin += 1 3329 sql_o_inner = sql_o[sql_o.find('ORDER BY ')+9:] 3330 sql_g_inner = sql_o[:sql_o.find('ORDER BY ')] 3331 sql_f_outer = ['f_%s' % f for f in range(len(sql_f.split(',')))] 3332 sql_f_inner = [f for f in sql_f.split(',')] 3333 sql_f_iproxy = ['%s AS %s' % (o, n) for (o, n) in zip(sql_f_inner, sql_f_outer)] 3334 sql_f_iproxy = ', '.join(sql_f_iproxy) 3335 sql_f_oproxy = ', '.join(sql_f_outer) 3336 return 'SELECT %s %s FROM (SELECT %s ROW_NUMBER() OVER (ORDER BY %s) AS w_row, %s FROM %s%s%s) TMP WHERE w_row BETWEEN %i AND %s;' % (sql_s,sql_f_oproxy,sql_s,sql_f,sql_f_iproxy,sql_t,sql_w,sql_g_inner,lmin,lmax) 3337 return 'SELECT %s %s FROM %s%s%s;' % (sql_s,sql_f,sql_t,sql_w,sql_o)
3338 - def rowslice(self,rows,minimum=0,maximum=None):
3339 return rows
3340
3341 3342 -class MSSQL2Adapter(MSSQLAdapter):
3343 drivers = ('pyodbc',) 3344 3345 types = { 3346 'boolean': 'CHAR(1)', 3347 'string': 'NVARCHAR(%(length)s)', 3348 'text': 'NTEXT', 3349 'json': 'NTEXT', 3350 'password': 'NVARCHAR(%(length)s)', 3351 'blob': 'IMAGE', 3352 'upload': 'NVARCHAR(%(length)s)', 3353 'integer': 'INT', 3354 'bigint': 'BIGINT', 3355 'float': 'FLOAT', 3356 'double': 'FLOAT', 3357 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3358 'date': 'DATETIME', 3359 'time': 'CHAR(8)', 3360 'datetime': 'DATETIME', 3361 'id': 'INT IDENTITY PRIMARY KEY', 3362 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3363 'list:integer': 'NTEXT', 3364 'list:string': 'NTEXT', 3365 'list:reference': 'NTEXT', 3366 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3367 'big-reference': 'BIGINT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3368 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3369 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3370 } 3371
3372 - def represent(self, obj, fieldtype):
3373 value = BaseAdapter.represent(self, obj, fieldtype) 3374 if fieldtype in ('string','text', 'json') and value[:1]=="'": 3375 value = 'N'+value 3376 return value
3377
3378 - def execute(self,a):
3379 return self.log_execute(a.decode('utf8'))
3380
3381 -class VerticaAdapter(MSSQLAdapter):
3382 drivers = ('pyodbc',) 3383 T_SEP = ' ' 3384 3385 types = { 3386 'boolean': 'BOOLEAN', 3387 'string': 'VARCHAR(%(length)s)', 3388 'text': 'BYTEA', 3389 'json': 'VARCHAR(%(length)s)', 3390 'password': 'VARCHAR(%(length)s)', 3391 'blob': 'BYTEA', 3392 'upload': 'VARCHAR(%(length)s)', 3393 'integer': 'INT', 3394 'bigint': 'BIGINT', 3395 'float': 'FLOAT', 3396 'double': 'DOUBLE PRECISION', 3397 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3398 'date': 'DATE', 3399 'time': 'TIME', 3400 'datetime': 'DATETIME', 3401 'id': 'IDENTITY', 3402 'reference': 'INT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3403 'list:integer': 'BYTEA', 3404 'list:string': 'BYTEA', 3405 'list:reference': 'BYTEA', 3406 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3407 } 3408 3409
3410 - def EXTRACT(self, first, what):
3411 return "DATE_PART('%s', TIMESTAMP %s)" % (what, self.expand(first))
3412
3413 - def _truncate(self, table, mode=''):
3414 tablename = table._tablename 3415 return ['TRUNCATE %s %s;' % (tablename, mode or '')]
3416
3417 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3418 if limitby: 3419 (lmin, lmax) = limitby 3420 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 3421 return 'SELECT %s %s FROM %s%s%s;' % \ 3422 (sql_s, sql_f, sql_t, sql_w, sql_o)
3423
3424 - def lastrowid(self,table):
3425 self.execute('SELECT LAST_INSERT_ID();') 3426 return long(self.cursor.fetchone()[0])
3427
3428 - def execute(self, a):
3429 return self.log_execute(a)
3430
3431 -class SybaseAdapter(MSSQLAdapter):
3432 drivers = ('Sybase',) 3433 3434 types = { 3435 'boolean': 'BIT', 3436 'string': 'CHAR VARYING(%(length)s)', 3437 'text': 'TEXT', 3438 'json': 'TEXT', 3439 'password': 'CHAR VARYING(%(length)s)', 3440 'blob': 'IMAGE', 3441 'upload': 'CHAR VARYING(%(length)s)', 3442 'integer': 'INT', 3443 'bigint': 'BIGINT', 3444 'float': 'FLOAT', 3445 'double': 'FLOAT', 3446 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3447 'date': 'DATETIME', 3448 'time': 'CHAR(8)', 3449 'datetime': 'DATETIME', 3450 'id': 'INT IDENTITY PRIMARY KEY', 3451 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3452 'list:integer': 'TEXT', 3453 'list:string': 'TEXT', 3454 'list:reference': 'TEXT', 3455 'geometry': 'geometry', 3456 'geography': 'geography', 3457 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3458 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3459 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3460 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3461 } 3462 3463
3464 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3465 credential_decoder=IDENTITY, driver_args={}, 3466 adapter_args={}, do_connect=True, srid=4326, 3467 after_connection=None):
3468 self.db = db 3469 self.dbengine = "sybase" 3470 self.uri = uri 3471 if do_connect: self.find_driver(adapter_args,uri) 3472 self.pool_size = pool_size 3473 self.folder = folder 3474 self.db_codec = db_codec 3475 self._after_connection = after_connection 3476 self.srid = srid 3477 self.find_or_make_work_folder() 3478 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3479 ruri = uri.split('://',1)[1] 3480 if '@' not in ruri: 3481 try: 3482 m = self.REGEX_DSN.match(ruri) 3483 if not m: 3484 raise SyntaxError( 3485 'Parsing uri string(%s) has no result' % self.uri) 3486 dsn = m.group('dsn') 3487 if not dsn: 3488 raise SyntaxError('DSN required') 3489 except SyntaxError: 3490 e = sys.exc_info()[1] 3491 LOGGER.error('NdGpatch error') 3492 raise e 3493 else: 3494 m = self.REGEX_URI.match(uri) 3495 if not m: 3496 raise SyntaxError( 3497 "Invalid URI string in DAL: %s" % self.uri) 3498 user = credential_decoder(m.group('user')) 3499 if not user: 3500 raise SyntaxError('User required') 3501 password = credential_decoder(m.group('password')) 3502 if not password: 3503 password = '' 3504 host = m.group('host') 3505 if not host: 3506 raise SyntaxError('Host name required') 3507 db = m.group('db') 3508 if not db: 3509 raise SyntaxError('Database name required') 3510 port = m.group('port') or '1433' 3511 3512 dsn = 'sybase:host=%s:%s;dbname=%s' % (host,port,db) 3513 3514 driver_args.update(user = credential_decoder(user), 3515 password = credential_decoder(password)) 3516 3517 def connector(dsn=dsn,driver_args=driver_args): 3518 return self.driver.connect(dsn,**driver_args)
3519 self.connector = connector 3520 if do_connect: self.reconnect()
3521
3522 3523 -class FireBirdAdapter(BaseAdapter):
3524 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3525 3526 commit_on_alter_table = False 3527 support_distributed_transaction = True 3528 types = { 3529 'boolean': 'CHAR(1)', 3530 'string': 'VARCHAR(%(length)s)', 3531 'text': 'BLOB SUB_TYPE 1', 3532 'json': 'BLOB SUB_TYPE 1', 3533 'password': 'VARCHAR(%(length)s)', 3534 'blob': 'BLOB SUB_TYPE 0', 3535 'upload': 'VARCHAR(%(length)s)', 3536 'integer': 'INTEGER', 3537 'bigint': 'BIGINT', 3538 'float': 'FLOAT', 3539 'double': 'DOUBLE PRECISION', 3540 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3541 'date': 'DATE', 3542 'time': 'TIME', 3543 'datetime': 'TIMESTAMP', 3544 'id': 'INTEGER PRIMARY KEY', 3545 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3546 'list:integer': 'BLOB SUB_TYPE 1', 3547 'list:string': 'BLOB SUB_TYPE 1', 3548 'list:reference': 'BLOB SUB_TYPE 1', 3549 'big-id': 'BIGINT PRIMARY KEY', 3550 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3551 } 3552
3553 - def sequence_name(self,tablename):
3554 return 'genid_%s' % tablename
3555
3556 - def trigger_name(self,tablename):
3557 return 'trg_id_%s' % tablename
3558
3559 - def RANDOM(self):
3560 return 'RAND()'
3561
3562 - def EPOCH(self, first):
3563 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3564
3565 - def NOT_NULL(self,default,field_type):
3566 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3567
3568 - def SUBSTRING(self,field,parameters):
3569 return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
3570
3571 - def LENGTH(self, first):
3572 return "CHAR_LENGTH(%s)" % self.expand(first)
3573
3574 - def CONTAINS(self,first,second,case_sensitive=False):
3575 if first.type.startswith('list:'): 3576 second = Expression(None,self.CONCAT('|',Expression( 3577 None,self.REPLACE(second,('|','||'))),'|')) 3578 return '(%s CONTAINING %s)' % (self.expand(first), 3579 self.expand(second, 'string'))
3580
3581 - def _drop(self,table,mode):
3582 sequence_name = table._sequence_name 3583 return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name]
3584
3585 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3586 if limitby: 3587 (lmin, lmax) = limitby 3588 sql_s = ' FIRST %i SKIP %i %s' % (lmax - lmin, lmin, sql_s) 3589 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3590
3591 - def _truncate(self,table,mode = ''):
3592 return ['DELETE FROM %s;' % table._tablename, 3593 'SET GENERATOR %s TO 0;' % table._sequence_name]
3594 3595 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$') 3596
3597 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3598 credential_decoder=IDENTITY, driver_args={}, 3599 adapter_args={}, do_connect=True, after_connection=None):
3600 self.db = db 3601 self.dbengine = "firebird" 3602 self.uri = uri 3603 if do_connect: self.find_driver(adapter_args,uri) 3604 self.pool_size = pool_size 3605 self.folder = folder 3606 self.db_codec = db_codec 3607 self._after_connection = after_connection 3608 self.find_or_make_work_folder() 3609 ruri = uri.split('://',1)[1] 3610 m = self.REGEX_URI.match(ruri) 3611 if not m: 3612 raise SyntaxError("Invalid URI string in DAL: %s" % self.uri) 3613 user = credential_decoder(m.group('user')) 3614 if not user: 3615 raise SyntaxError('User required') 3616 password = credential_decoder(m.group('password')) 3617 if not password: 3618 password = '' 3619 host = m.group('host') 3620 if not host: 3621 raise SyntaxError('Host name required') 3622 port = int(m.group('port') or 3050) 3623 db = m.group('db') 3624 if not db: 3625 raise SyntaxError('Database name required') 3626 charset = m.group('charset') or 'UTF8' 3627 driver_args.update(dsn='%s/%s:%s' % (host,port,db), 3628 user = credential_decoder(user), 3629 password = credential_decoder(password), 3630 charset = charset) 3631 3632 def connector(driver_args=driver_args): 3633 return self.driver.connect(**driver_args)
3634 self.connector = connector 3635 if do_connect: self.reconnect()
3636
3637 - def create_sequence_and_triggers(self, query, table, **args):
3638 tablename = table._tablename 3639 sequence_name = table._sequence_name 3640 trigger_name = table._trigger_name 3641 self.execute(query) 3642 self.execute('create generator %s;' % sequence_name) 3643 self.execute('set generator %s to 0;' % sequence_name) 3644 self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
3645
3646 - def lastrowid(self,table):
3647 sequence_name = table._sequence_name 3648 self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) 3649 return long(self.cursor.fetchone()[0])
3650
3651 3652 -class FireBirdEmbeddedAdapter(FireBirdAdapter):
3653 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3654 3655 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$') 3656
3657 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3658 credential_decoder=IDENTITY, driver_args={}, 3659 adapter_args={}, do_connect=True, after_connection=None):
3660 self.db = db 3661 self.dbengine = "firebird" 3662 self.uri = uri 3663 if do_connect: self.find_driver(adapter_args,uri) 3664 self.pool_size = pool_size 3665 self.folder = folder 3666 self.db_codec = db_codec 3667 self._after_connection = after_connection 3668 self.find_or_make_work_folder() 3669 ruri = uri.split('://',1)[1] 3670 m = self.REGEX_URI.match(ruri) 3671 if not m: 3672 raise SyntaxError( 3673 "Invalid URI string in DAL: %s" % self.uri) 3674 user = credential_decoder(m.group('user')) 3675 if not user: 3676 raise SyntaxError('User required') 3677 password = credential_decoder(m.group('password')) 3678 if not password: 3679 password = '' 3680 pathdb = m.group('path') 3681 if not pathdb: 3682 raise SyntaxError('Path required') 3683 charset = m.group('charset') 3684 if not charset: 3685 charset = 'UTF8' 3686 host = '' 3687 driver_args.update(host=host, 3688 database=pathdb, 3689 user=credential_decoder(user), 3690 password=credential_decoder(password), 3691 charset=charset) 3692 3693 def connector(driver_args=driver_args): 3694 return self.driver.connect(**driver_args)
3695 self.connector = connector 3696 if do_connect: self.reconnect()
3697
3698 -class InformixAdapter(BaseAdapter):
3699 drivers = ('informixdb',) 3700 3701 types = { 3702 'boolean': 'CHAR(1)', 3703 'string': 'VARCHAR(%(length)s)', 3704 'text': 'BLOB SUB_TYPE 1', 3705 'json': 'BLOB SUB_TYPE 1', 3706 'password': 'VARCHAR(%(length)s)', 3707 'blob': 'BLOB SUB_TYPE 0', 3708 'upload': 'VARCHAR(%(length)s)', 3709 'integer': 'INTEGER', 3710 'bigint': 'BIGINT', 3711 'float': 'FLOAT', 3712 'double': 'DOUBLE PRECISION', 3713 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3714 'date': 'DATE', 3715 'time': 'CHAR(8)', 3716 'datetime': 'DATETIME', 3717 'id': 'SERIAL', 3718 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3719 'list:integer': 'BLOB SUB_TYPE 1', 3720 'list:string': 'BLOB SUB_TYPE 1', 3721 'list:reference': 'BLOB SUB_TYPE 1', 3722 'big-id': 'BIGSERIAL', 3723 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3724 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', 3725 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', 3726 } 3727
3728 - def RANDOM(self):
3729 return 'Random()'
3730
3731 - def NOT_NULL(self,default,field_type):
3732 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3733
3734 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3735 if limitby: 3736 (lmin, lmax) = limitby 3737 fetch_amt = lmax - lmin 3738 dbms_version = int(self.connection.dbms_version.split('.')[0]) 3739 if lmin and (dbms_version >= 10): 3740 # Requires Informix 10.0+ 3741 sql_s += ' SKIP %d' % (lmin, ) 3742 if fetch_amt and (dbms_version >= 9): 3743 # Requires Informix 9.0+ 3744 sql_s += ' FIRST %d' % (fetch_amt, ) 3745 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3746
3747 - def represent_exceptions(self, obj, fieldtype):
3748 if fieldtype == 'date': 3749 if isinstance(obj, (datetime.date, datetime.datetime)): 3750 obj = obj.isoformat()[:10] 3751 else: 3752 obj = str(obj) 3753 return "to_date('%s','%%Y-%%m-%%d')" % obj 3754 elif fieldtype == 'datetime': 3755 if isinstance(obj, datetime.datetime): 3756 obj = obj.isoformat()[:19].replace('T',' ') 3757 elif isinstance(obj, datetime.date): 3758 obj = obj.isoformat()[:10]+' 00:00:00' 3759 else: 3760 obj = str(obj) 3761 return "to_date('%s','%%Y-%%m-%%d %%H:%%M:%%S')" % obj 3762 return None
3763 3764 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 3765
3766 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3767 credential_decoder=IDENTITY, driver_args={}, 3768 adapter_args={}, do_connect=True, after_connection=None):
3769 self.db = db 3770 self.dbengine = "informix" 3771 self.uri = uri 3772 if do_connect: self.find_driver(adapter_args,uri) 3773 self.pool_size = pool_size 3774 self.folder = folder 3775 self.db_codec = db_codec 3776 self._after_connection = after_connection 3777 self.find_or_make_work_folder() 3778 ruri = uri.split('://',1)[1] 3779 m = self.REGEX_URI.match(ruri) 3780 if not m: 3781 raise SyntaxError( 3782 "Invalid URI string in DAL: %s" % self.uri) 3783 user = credential_decoder(m.group('user')) 3784 if not user: 3785 raise SyntaxError('User required') 3786 password = credential_decoder(m.group('password')) 3787 if not password: 3788 password = '' 3789 host = m.group('host') 3790 if not host: 3791 raise SyntaxError('Host name required') 3792 db = m.group('db') 3793 if not db: 3794 raise SyntaxError('Database name required') 3795 user = credential_decoder(user) 3796 password = credential_decoder(password) 3797 dsn = '%s@%s' % (db,host) 3798 driver_args.update(user=user,password=password,autocommit=True) 3799 def connector(dsn=dsn,driver_args=driver_args): 3800 return self.driver.connect(dsn,**driver_args)
3801 self.connector = connector 3802 if do_connect: self.reconnect()
3803
3804 - def execute(self,command):
3805 if command[-1:]==';': 3806 command = command[:-1] 3807 return self.log_execute(command)
3808
3809 - def lastrowid(self,table):
3810 return self.cursor.sqlerrd[1]
3811
3812 -class InformixSEAdapter(InformixAdapter):
3813 """ work in progress """ 3814
3815 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3816 return 'SELECT %s %s FROM %s%s%s;' % \ 3817 (sql_s, sql_f, sql_t, sql_w, sql_o)
3818
3819 - def rowslice(self,rows,minimum=0,maximum=None):
3820 if maximum is None: 3821 return rows[minimum:] 3822 return rows[minimum:maximum]
3823
3824 -class DB2Adapter(BaseAdapter):
3825 drivers = ('pyodbc',) 3826 3827 types = { 3828 'boolean': 'CHAR(1)', 3829 'string': 'VARCHAR(%(length)s)', 3830 'text': 'CLOB', 3831 'json': 'CLOB', 3832 'password': 'VARCHAR(%(length)s)', 3833 'blob': 'BLOB', 3834 'upload': 'VARCHAR(%(length)s)', 3835 'integer': 'INT', 3836 'bigint': 'BIGINT', 3837 'float': 'REAL', 3838 'double': 'DOUBLE', 3839 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3840 'date': 'DATE', 3841 'time': 'TIME', 3842 'datetime': 'TIMESTAMP', 3843 'id': 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3844 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3845 'list:integer': 'CLOB', 3846 'list:string': 'CLOB', 3847 'list:reference': 'CLOB', 3848 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3849 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3850 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3851 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3852 } 3853
3854 - def LEFT_JOIN(self):
3855 return 'LEFT OUTER JOIN'
3856
3857 - def RANDOM(self):
3858 return 'RAND()'
3859
3860 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3861 if limitby: 3862 (lmin, lmax) = limitby 3863 sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax 3864 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3865
3866 - def represent_exceptions(self, obj, fieldtype):
3867 if fieldtype == 'blob': 3868 obj = base64.b64encode(str(obj)) 3869 return "BLOB('%s')" % obj 3870 elif fieldtype == 'datetime': 3871 if isinstance(obj, datetime.datetime): 3872 obj = obj.isoformat()[:19].replace('T','-').replace(':','.') 3873 elif isinstance(obj, datetime.date): 3874 obj = obj.isoformat()[:10]+'-00.00.00' 3875 return "'%s'" % obj 3876 return None
3877
3878 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3879 credential_decoder=IDENTITY, driver_args={}, 3880 adapter_args={}, do_connect=True, after_connection=None):
3881 self.db = db 3882 self.dbengine = "db2" 3883 self.uri = uri 3884 if do_connect: self.find_driver(adapter_args,uri) 3885 self.pool_size = pool_size 3886 self.folder = folder 3887 self.db_codec = db_codec 3888 self._after_connection = after_connection 3889 self.find_or_make_work_folder() 3890 ruri = uri.split('://', 1)[1] 3891 def connector(cnxn=ruri,driver_args=driver_args): 3892 return self.driver.connect(cnxn,**driver_args)
3893 self.connector = connector 3894 if do_connect: self.reconnect()
3895
3896 - def execute(self,command):
3897 if command[-1:]==';': 3898 command = command[:-1] 3899 return self.log_execute(command)
3900
3901 - def lastrowid(self,table):
3902 self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) 3903 return long(self.cursor.fetchone()[0])
3904
3905 - def rowslice(self,rows,minimum=0,maximum=None):
3906 if maximum is None: 3907 return rows[minimum:] 3908 return rows[minimum:maximum]
3909
3910 3911 -class TeradataAdapter(BaseAdapter):
3912 drivers = ('pyodbc',) 3913 3914 types = { 3915 'boolean': 'CHAR(1)', 3916 'string': 'VARCHAR(%(length)s)', 3917 'text': 'CLOB', 3918 'json': 'CLOB', 3919 'password': 'VARCHAR(%(length)s)', 3920 'blob': 'BLOB', 3921 'upload': 'VARCHAR(%(length)s)', 3922 'integer': 'INT', 3923 'bigint': 'BIGINT', 3924 'float': 'REAL', 3925 'double': 'DOUBLE', 3926 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3927 'date': 'DATE', 3928 'time': 'TIME', 3929 'datetime': 'TIMESTAMP', 3930 # Modified Constraint syntax for Teradata. 3931 # Teradata does not support ON DELETE. 3932 'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 3933 'reference': 'INT', 3934 'list:integer': 'CLOB', 3935 'list:string': 'CLOB', 3936 'list:reference': 'CLOB', 3937 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 3938 'big-reference': 'BIGINT', 3939 'reference FK': ' REFERENCES %(foreign_key)s', 3940 'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)', 3941 } 3942
3943 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3944 credential_decoder=IDENTITY, driver_args={}, 3945 adapter_args={}, do_connect=True, after_connection=None):
3946 self.db = db 3947 self.dbengine = "teradata" 3948 self.uri = uri 3949 if do_connect: self.find_driver(adapter_args,uri) 3950 self.pool_size = pool_size 3951 self.folder = folder 3952 self.db_codec = db_codec 3953 self._after_connection = after_connection 3954 self.find_or_make_work_folder() 3955 ruri = uri.split('://', 1)[1] 3956 def connector(cnxn=ruri,driver_args=driver_args): 3957 return self.driver.connect(cnxn,**driver_args)
3958 self.connector = connector 3959 if do_connect: self.reconnect()
3960
3961 - def LEFT_JOIN(self):
3962 return 'LEFT OUTER JOIN'
3963 3964 # Similar to MSSQL, Teradata can't specify a range (for Pageby)
3965 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3966 if limitby: 3967 (lmin, lmax) = limitby 3968 sql_s += ' TOP %i' % lmax 3969 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3970
3971 - def _truncate(self, table, mode=''):
3972 tablename = table._tablename 3973 return ['DELETE FROM %s ALL;' % (tablename)]
3974 3975 INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name
3976 # (ANSI-SQL wants this form of name 3977 # to be a delimited identifier) 3978 3979 -class IngresAdapter(BaseAdapter):
3980 drivers = ('pyodbc',) 3981 3982 types = { 3983 'boolean': 'CHAR(1)', 3984 'string': 'VARCHAR(%(length)s)', 3985 'text': 'CLOB', 3986 'json': 'CLOB', 3987 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 3988 'blob': 'BLOB', 3989 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 3990 'integer': 'INTEGER4', # or int8... 3991 'bigint': 'BIGINT', 3992 'float': 'FLOAT', 3993 'double': 'FLOAT8', 3994 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3995 'date': 'ANSIDATE', 3996 'time': 'TIME WITHOUT TIME ZONE', 3997 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 3998 'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME, 3999 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4000 'list:integer': 'CLOB', 4001 'list:string': 'CLOB', 4002 'list:reference': 'CLOB', 4003 'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME, 4004 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4005 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4006 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 4007 } 4008
4009 - def LEFT_JOIN(self):
4010 return 'LEFT OUTER JOIN'
4011
4012 - def RANDOM(self):
4013 return 'RANDOM()'
4014
4015 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4016 if limitby: 4017 (lmin, lmax) = limitby 4018 fetch_amt = lmax - lmin 4019 if fetch_amt: 4020 sql_s += ' FIRST %d ' % (fetch_amt, ) 4021 if lmin: 4022 # Requires Ingres 9.2+ 4023 sql_o += ' OFFSET %d' % (lmin, ) 4024 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4025
4026 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4027 credential_decoder=IDENTITY, driver_args={}, 4028 adapter_args={}, do_connect=True, after_connection=None):
4029 self.db = db 4030 self.dbengine = "ingres" 4031 self._driver = pyodbc 4032 self.uri = uri 4033 if do_connect: self.find_driver(adapter_args,uri) 4034 self.pool_size = pool_size 4035 self.folder = folder 4036 self.db_codec = db_codec 4037 self._after_connection = after_connection 4038 self.find_or_make_work_folder() 4039 connstr = uri.split(':', 1)[1] 4040 # Simple URI processing 4041 connstr = connstr.lstrip() 4042 while connstr.startswith('/'): 4043 connstr = connstr[1:] 4044 if '=' in connstr: 4045 # Assume we have a regular ODBC connection string and just use it 4046 ruri = connstr 4047 else: 4048 # Assume only (local) dbname is passed in with OS auth 4049 database_name = connstr 4050 default_driver_name = 'Ingres' 4051 vnode = '(local)' 4052 servertype = 'ingres' 4053 ruri = 'Driver={%s};Server=%s;Database=%s' % (default_driver_name, vnode, database_name) 4054 def connector(cnxn=ruri,driver_args=driver_args): 4055 return self.driver.connect(cnxn,**driver_args)
4056 4057 self.connector = connector 4058 4059 # TODO if version is >= 10, set types['id'] to Identity column, see http://community.actian.com/wiki/Using_Ingres_Identity_Columns 4060 if do_connect: self.reconnect()
4061
4062 - def create_sequence_and_triggers(self, query, table, **args):
4063 # post create table auto inc code (if needed) 4064 # modify table to btree for performance.... 4065 # Older Ingres releases could use rule/trigger like Oracle above. 4066 if hasattr(table,'_primarykey'): 4067 modify_tbl_sql = 'modify %s to btree unique on %s' % \ 4068 (table._tablename, 4069 ', '.join(["'%s'" % x for x in table.primarykey])) 4070 self.execute(modify_tbl_sql) 4071 else: 4072 tmp_seqname='%s_iisq' % table._tablename 4073 query=query.replace(INGRES_SEQNAME, tmp_seqname) 4074 self.execute('create sequence %s' % tmp_seqname) 4075 self.execute(query) 4076 self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
4077 4078
4079 - def lastrowid(self,table):
4080 tmp_seqname='%s_iisq' % table 4081 self.execute('select current value for %s' % tmp_seqname) 4082 return long(self.cursor.fetchone()[0]) # don't really need int type cast here...
4083
4084 4085 -class IngresUnicodeAdapter(IngresAdapter):
4086 4087 drivers = ('pyodbc',) 4088 4089 types = { 4090 'boolean': 'CHAR(1)', 4091 'string': 'NVARCHAR(%(length)s)', 4092 'text': 'NCLOB', 4093 'json': 'NCLOB', 4094 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 4095 'blob': 'BLOB', 4096 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 4097 'integer': 'INTEGER4', # or int8... 4098 'bigint': 'BIGINT', 4099 'float': 'FLOAT', 4100 'double': 'FLOAT8', 4101 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 4102 'date': 'ANSIDATE', 4103 'time': 'TIME WITHOUT TIME ZONE', 4104 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 4105 'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME, 4106 'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4107 'list:integer': 'NCLOB', 4108 'list:string': 'NCLOB', 4109 'list:reference': 'NCLOB', 4110 'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME, 4111 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4112 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4113 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 4114 }
4115
4116 -class SAPDBAdapter(BaseAdapter):
4117 drivers = ('sapdb',) 4118 4119 support_distributed_transaction = False 4120 types = { 4121 'boolean': 'CHAR(1)', 4122 'string': 'VARCHAR(%(length)s)', 4123 'text': 'LONG', 4124 'json': 'LONG', 4125 'password': 'VARCHAR(%(length)s)', 4126 'blob': 'LONG', 4127 'upload': 'VARCHAR(%(length)s)', 4128 'integer': 'INT', 4129 'bigint': 'BIGINT', 4130 'float': 'FLOAT', 4131 'double': 'DOUBLE PRECISION', 4132 'decimal': 'FIXED(%(precision)s,%(scale)s)', 4133 'date': 'DATE', 4134 'time': 'TIME', 4135 'datetime': 'TIMESTAMP', 4136 'id': 'INT PRIMARY KEY', 4137 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4138 'list:integer': 'LONG', 4139 'list:string': 'LONG', 4140 'list:reference': 'LONG', 4141 'big-id': 'BIGINT PRIMARY KEY', 4142 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4143 } 4144
4145 - def sequence_name(self,table):
4146 return '%s_id_Seq' % table
4147
4148 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4149 if limitby: 4150 (lmin, lmax) = limitby 4151 if len(sql_w) > 1: 4152 sql_w_row = sql_w + ' AND w_row > %i' % lmin 4153 else: 4154 sql_w_row = 'WHERE w_row > %i' % lmin 4155 return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 4156 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4157
4158 - def create_sequence_and_triggers(self, query, table, **args):
4159 # following lines should only be executed if table._sequence_name does not exist 4160 self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 4161 self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 4162 % (table._tablename, table._id.name, table._sequence_name)) 4163 self.execute(query)
4164 4165 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 4166 4167
4168 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4169 credential_decoder=IDENTITY, driver_args={}, 4170 adapter_args={}, do_connect=True, after_connection=None):
4171 self.db = db 4172 self.dbengine = "sapdb" 4173 self.uri = uri 4174 if do_connect: self.find_driver(adapter_args,uri) 4175 self.pool_size = pool_size 4176 self.folder = folder 4177 self.db_codec = db_codec 4178 self._after_connection = after_connection 4179 self.find_or_make_work_folder() 4180 ruri = uri.split('://',1)[1] 4181 m = self.REGEX_URI.match(ruri) 4182 if not m: 4183 raise SyntaxError("Invalid URI string in DAL") 4184 user = credential_decoder(m.group('user')) 4185 if not user: 4186 raise SyntaxError('User required') 4187 password = credential_decoder(m.group('password')) 4188 if not password: 4189 password = '' 4190 host = m.group('host') 4191 if not host: 4192 raise SyntaxError('Host name required') 4193 db = m.group('db') 4194 if not db: 4195 raise SyntaxError('Database name required') 4196 def connector(user=user, password=password, database=db, 4197 host=host, driver_args=driver_args): 4198 return self.driver.Connection(user, password, database, 4199 host, **driver_args)
4200 self.connector = connector 4201 if do_connect: self.reconnect()
4202
4203 - def lastrowid(self,table):
4204 self.execute("select %s.NEXTVAL from dual" % table._sequence_name) 4205 return long(self.cursor.fetchone()[0])
4206
4207 -class CubridAdapter(MySQLAdapter):
4208 drivers = ('cubriddb',) 4209 4210 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 4211
4212 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 4213 credential_decoder=IDENTITY, driver_args={}, 4214 adapter_args={}, do_connect=True, after_connection=None):
4215 self.db = db 4216 self.dbengine = "cubrid" 4217 self.uri = uri 4218 if do_connect: self.find_driver(adapter_args,uri) 4219 self.pool_size = pool_size 4220 self.folder = folder 4221 self.db_codec = db_codec 4222 self._after_connection = after_connection 4223 self.find_or_make_work_folder() 4224 ruri = uri.split('://',1)[1] 4225 m = self.REGEX_URI.match(ruri) 4226 if not m: 4227 raise SyntaxError( 4228 "Invalid URI string in DAL: %s" % self.uri) 4229 user = credential_decoder(m.group('user')) 4230 if not user: 4231 raise SyntaxError('User required') 4232 password = credential_decoder(m.group('password')) 4233 if not password: 4234 password = '' 4235 host = m.group('host') 4236 if not host: 4237 raise SyntaxError('Host name required') 4238 db = m.group('db') 4239 if not db: 4240 raise SyntaxError('Database name required') 4241 port = int(m.group('port') or '30000') 4242 charset = m.group('charset') or 'utf8' 4243 user = credential_decoder(user) 4244 passwd = credential_decoder(password) 4245 def connector(host=host,port=port,db=db, 4246 user=user,passwd=password,driver_args=driver_args): 4247 return self.driver.connect(host,port,db,user,passwd,**driver_args)
4248 self.connector = connector 4249 if do_connect: self.reconnect()
4250
4251 - def after_connection(self):
4252 self.execute('SET FOREIGN_KEY_CHECKS=1;') 4253 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4254
4255 4256 ######## GAE MySQL ########## 4257 4258 -class DatabaseStoredFile:
4259 4260 web2py_filesystem = False 4261
4262 - def escape(self,obj):
4263 return self.db._adapter.escape(obj)
4264
4265 - def __init__(self,db,filename,mode):
4266 if not db._adapter.dbengine in ('mysql', 'postgres', 'sqlite'): 4267 raise RuntimeError("only MySQL/Postgres/SQLite can store metadata .table files in database for now") 4268 self.db = db 4269 self.filename = filename 4270 self.mode = mode 4271 if not self.web2py_filesystem: 4272 if db._adapter.dbengine == 'mysql': 4273 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;" 4274 elif db._adapter.dbengine in ('postgres', 'sqlite'): 4275 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content TEXT, PRIMARY KEY(path));" 4276 self.db.executesql(sql) 4277 DatabaseStoredFile.web2py_filesystem = True 4278 self.p=0 4279 self.data = '' 4280 if mode in ('r','rw','a'): 4281 query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \ 4282 % filename 4283 rows = self.db.executesql(query) 4284 if rows: 4285 self.data = rows[0][0] 4286 elif exists(filename): 4287 datafile = open(filename, 'r') 4288 try: 4289 self.data = datafile.read() 4290 finally: 4291 datafile.close() 4292 elif mode in ('r','rw'): 4293 raise RuntimeError("File %s does not exist" % filename)
4294
4295 - def read(self, bytes):
4296 data = self.data[self.p:self.p+bytes] 4297 self.p += len(data) 4298 return data
4299
4300 - def readline(self):
4301 i = self.data.find('\n',self.p)+1 4302 if i>0: 4303 data, self.p = self.data[self.p:i], i 4304 else: 4305 data, self.p = self.data[self.p:], len(self.data) 4306 return data
4307
4308 - def write(self,data):
4309 self.data += data
4310
4311 - def close_connection(self):
4312 if self.db is not None: 4313 self.db.executesql( 4314 "DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename) 4315 query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\ 4316 % (self.filename, self.data.replace("'","''")) 4317 self.db.executesql(query) 4318 self.db.commit() 4319 self.db = None
4320
4321 - def close(self):
4322 self.close_connection()
4323 4324 @staticmethod
4325 - def exists(db, filename):
4326 if exists(filename): 4327 return True 4328 query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename 4329 try: 4330 if db.executesql(query): 4331 return True 4332 except Exception, e: 4333 if not db._adapter.isOperationalError(e): 4334 raise 4335 # no web2py_filesystem found? 4336 tb = traceback.format_exc() 4337 LOGGER.error("Could not retrieve %s\n%s" % (filename, tb)) 4338 return False
4339
4340 4341 -class UseDatabaseStoredFile:
4342
4343 - def file_exists(self, filename):
4344 return DatabaseStoredFile.exists(self.db,filename)
4345
4346 - def file_open(self, filename, mode='rb', lock=True):
4347 return DatabaseStoredFile(self.db,filename,mode)
4348
4349 - def file_close(self, fileobj):
4350 fileobj.close_connection()
4351
4352 - def file_delete(self,filename):
4353 query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename 4354 self.db.executesql(query) 4355 self.db.commit()
4356
4357 -class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
4358 uploads_in_blob = True 4359 4360 REGEX_URI = re.compile('^(?P<instance>.*)/(?P<db>.*)$') 4361
4362 - def __init__(self, db, uri='google:sql://realm:domain/database', 4363 pool_size=0, folder=None, db_codec='UTF-8', 4364 credential_decoder=IDENTITY, driver_args={}, 4365 adapter_args={}, do_connect=True, after_connection=None):
4366 4367 self.db = db 4368 self.dbengine = "mysql" 4369 self.uri = uri 4370 self.pool_size = pool_size 4371 self.db_codec = db_codec 4372 self._after_connection = after_connection 4373 self.folder = folder or pjoin('$HOME',THREAD_LOCAL.folder.split( 4374 os.sep+'applications'+os.sep,1)[1]) 4375 ruri = uri.split("://")[1] 4376 m = self.REGEX_URI.match(ruri) 4377 if not m: 4378 raise SyntaxError("Invalid URI string in SQLDB: %s" % self.uri) 4379 instance = credential_decoder(m.group('instance')) 4380 self.dbstring = db = credential_decoder(m.group('db')) 4381 driver_args['instance'] = instance 4382 if not 'charset' in driver_args: 4383 driver_args['charset'] = 'utf8' 4384 self.createdb = createdb = adapter_args.get('createdb',True) 4385 if not createdb: 4386 driver_args['database'] = db 4387 def connector(driver_args=driver_args): 4388 return rdbms.connect(**driver_args)
4389 self.connector = connector 4390 if do_connect: self.reconnect()
4391
4392 - def after_connection(self):
4393 if self.createdb: 4394 # self.execute('DROP DATABASE %s' % self.dbstring) 4395 self.execute('CREATE DATABASE IF NOT EXISTS %s' % self.dbstring) 4396 self.execute('USE %s' % self.dbstring) 4397 self.execute("SET FOREIGN_KEY_CHECKS=1;") 4398 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4399
4400 - def execute(self, command, *a, **b):
4401 return self.log_execute(command.decode('utf8'), *a, **b)
4402
4403 -class NoSQLAdapter(BaseAdapter):
4404 can_select_for_update = False 4405 4406 @staticmethod
4407 - def to_unicode(obj):
4408 if isinstance(obj, str): 4409 return obj.decode('utf8') 4410 elif not isinstance(obj, unicode): 4411 return unicode(obj) 4412 return obj
4413
4414 - def id_query(self, table):
4415 return table._id > 0
4416
4417 - def represent(self, obj, fieldtype):
4418 field_is_type = fieldtype.startswith 4419 if isinstance(obj, CALLABLETYPES): 4420 obj = obj() 4421 if isinstance(fieldtype, SQLCustomType): 4422 return fieldtype.encoder(obj) 4423 if isinstance(obj, (Expression, Field)): 4424 raise SyntaxError("non supported on GAE") 4425 if self.dbengine == 'google:datastore': 4426 if isinstance(fieldtype, gae.Property): 4427 return obj 4428 is_string = isinstance(fieldtype,str) 4429 is_list = is_string and field_is_type('list:') 4430 if is_list: 4431 if not obj: 4432 obj = [] 4433 if not isinstance(obj, (list, tuple)): 4434 obj = [obj] 4435 if obj == '' and not \ 4436 (is_string and fieldtype[:2] in ['st','te', 'pa','up']): 4437 return None 4438 if not obj is None: 4439 if isinstance(obj, list) and not is_list: 4440 obj = [self.represent(o, fieldtype) for o in obj] 4441 elif fieldtype in ('integer','bigint','id'): 4442 obj = long(obj) 4443 elif fieldtype == 'double': 4444 obj = float(obj) 4445 elif is_string and field_is_type('reference'): 4446 if isinstance(obj, (Row, Reference)): 4447 obj = obj['id'] 4448 obj = long(obj) 4449 elif fieldtype == 'boolean': 4450 if obj and not str(obj)[0].upper() in '0F': 4451 obj = True 4452 else: 4453 obj = False 4454 elif fieldtype == 'date': 4455 if not isinstance(obj, datetime.date): 4456 (y, m, d) = map(int,str(obj).strip().split('-')) 4457 obj = datetime.date(y, m, d) 4458 elif isinstance(obj,datetime.datetime): 4459 (y, m, d) = (obj.year, obj.month, obj.day) 4460 obj = datetime.date(y, m, d) 4461 elif fieldtype == 'time': 4462 if not isinstance(obj, datetime.time): 4463 time_items = map(int,str(obj).strip().split(':')[:3]) 4464 if len(time_items) == 3: 4465 (h, mi, s) = time_items 4466 else: 4467 (h, mi, s) = time_items + [0] 4468 obj = datetime.time(h, mi, s) 4469 elif fieldtype == 'datetime': 4470 if not isinstance(obj, datetime.datetime): 4471 (y, m, d) = map(int,str(obj)[:10].strip().split('-')) 4472 time_items = map(int,str(obj)[11:].strip().split(':')[:3]) 4473 while len(time_items)<3: 4474 time_items.append(0) 4475 (h, mi, s) = time_items 4476 obj = datetime.datetime(y, m, d, h, mi, s) 4477 elif fieldtype == 'blob': 4478 pass 4479 elif fieldtype == 'json': 4480 if isinstance(obj, basestring): 4481 obj = self.to_unicode(obj) 4482 if have_serializers: 4483 obj = serializers.loads_json(obj) 4484 elif simplejson: 4485 obj = simplejson.loads(obj) 4486 else: 4487 raise RuntimeError("missing simplejson") 4488 elif is_string and field_is_type('list:string'): 4489 return map(self.to_unicode,obj) 4490 elif is_list: 4491 return map(int,obj) 4492 else: 4493 obj = self.to_unicode(obj) 4494 return obj
4495
4496 - def _insert(self,table,fields):
4497 return 'insert %s in %s' % (fields, table)
4498
4499 - def _count(self,query,distinct=None):
4500 return 'count %s' % repr(query)
4501
4502 - def _select(self,query,fields,attributes):
4503 return 'select %s where %s' % (repr(fields), repr(query))
4504
4505 - def _delete(self,tablename, query):
4506 return 'delete %s where %s' % (repr(tablename),repr(query))
4507
4508 - def _update(self,tablename,query,fields):
4509 return 'update %s (%s) where %s' % (repr(tablename), 4510 repr(fields),repr(query))
4511
4512 - def commit(self):
4513 """ 4514 remember: no transactions on many NoSQL 4515 """ 4516 pass
4517
4518 - def rollback(self):
4519 """ 4520 remember: no transactions on many NoSQL 4521 """ 4522 pass
4523
4524 - def close_connection(self):
4525 """ 4526 remember: no transactions on many NoSQL 4527 """ 4528 pass
4529 4530 4531 # these functions should never be called!
4532 - def OR(self,first,second): raise SyntaxError("Not supported")
4533 - def AND(self,first,second): raise SyntaxError("Not supported")
4534 - def AS(self,first,second): raise SyntaxError("Not supported")
4535 - def ON(self,first,second): raise SyntaxError("Not supported")
4536 - def STARTSWITH(self,first,second=None): raise SyntaxError("Not supported")
4537 - def ENDSWITH(self,first,second=None): raise SyntaxError("Not supported")
4538 - def ADD(self,first,second): raise SyntaxError("Not supported")
4539 - def SUB(self,first,second): raise SyntaxError("Not supported")
4540 - def MUL(self,first,second): raise SyntaxError("Not supported")
4541 - def DIV(self,first,second): raise SyntaxError("Not supported")
4542 - def LOWER(self,first): raise SyntaxError("Not supported")
4543 - def UPPER(self,first): raise SyntaxError("Not supported")
4544 - def EXTRACT(self,first,what): raise SyntaxError("Not supported")
4545 - def LENGTH(self, first): raise SyntaxError("Not supported")
4546 - def AGGREGATE(self,first,what): raise SyntaxError("Not supported")
4547 - def LEFT_JOIN(self): raise SyntaxError("Not supported")
4548 - def RANDOM(self): raise SyntaxError("Not supported")
4549 - def SUBSTRING(self,field,parameters): raise SyntaxError("Not supported")
4550 - def PRIMARY_KEY(self,key): raise SyntaxError("Not supported")
4551 - def ILIKE(self,first,second): raise SyntaxError("Not supported")
4552 - def drop(self,table,mode): raise SyntaxError("Not supported")
4553 - def alias(self,table,alias): raise SyntaxError("Not supported")
4554 - def migrate_table(self,*a,**b): raise SyntaxError("Not supported")
4555 - def distributed_transaction_begin(self,key): raise SyntaxError("Not supported")
4556 - def prepare(self,key): raise SyntaxError("Not supported")
4557 - def commit_prepared(self,key): raise SyntaxError("Not supported")
4558 - def rollback_prepared(self,key): raise SyntaxError("Not supported")
4559 - def concat_add(self,table): raise SyntaxError("Not supported")
4560 - def constraint_name(self, table, fieldname): raise SyntaxError("Not supported")
4561 - def create_sequence_and_triggers(self, query, table, **args): pass
4562 - def log_execute(self,*a,**b): raise SyntaxError("Not supported")
4563 - def execute(self,*a,**b): raise SyntaxError("Not supported")
4564 - def represent_exceptions(self, obj, fieldtype): raise SyntaxError("Not supported")
4565 - def lastrowid(self,table): raise SyntaxError("Not supported")
4566 - def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError("Not supported")
4567
4568 4569 -class GAEF(object):
4570 - def __init__(self,name,op,value,apply):
4571 self.name=name=='id' and '__key__' or name 4572 self.op=op 4573 self.value=value 4574 self.apply=apply
4575 - def __repr__(self):
4576 return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
4577
4578 -class GoogleDatastoreAdapter(NoSQLAdapter):
4579 uploads_in_blob = True 4580 types = {} 4581
4582 - def file_exists(self, filename): pass
4583 - def file_open(self, filename, mode='rb', lock=True): pass
4584 - def file_close(self, fileobj): pass
4585 4586 REGEX_NAMESPACE = re.compile('.*://(?P<namespace>.+)') 4587
4588 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4589 credential_decoder=IDENTITY, driver_args={}, 4590 adapter_args={}, do_connect=True, after_connection=None):
4591 self.types.update({ 4592 'boolean': gae.BooleanProperty, 4593 'string': (lambda **kwargs: gae.StringProperty(multiline=True, **kwargs)), 4594 'text': gae.TextProperty, 4595 'json': gae.TextProperty, 4596 'password': gae.StringProperty, 4597 'blob': gae.BlobProperty, 4598 'upload': gae.StringProperty, 4599 'integer': gae.IntegerProperty, 4600 'bigint': gae.IntegerProperty, 4601 'float': gae.FloatProperty, 4602 'double': gae.FloatProperty, 4603 'decimal': GAEDecimalProperty, 4604 'date': gae.DateProperty, 4605 'time': gae.TimeProperty, 4606 'datetime': gae.DateTimeProperty, 4607 'id': None, 4608 'reference': gae.IntegerProperty, 4609 'list:string': (lambda **kwargs: gae.StringListProperty(default=None, **kwargs)), 4610 'list:integer': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4611 'list:reference': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4612 }) 4613 self.db = db 4614 self.uri = uri 4615 self.dbengine = 'google:datastore' 4616 self.folder = folder 4617 db['_lastsql'] = '' 4618 self.db_codec = 'UTF-8' 4619 self._after_connection = after_connection 4620 self.pool_size = 0 4621 match = self.REGEX_NAMESPACE.match(uri) 4622 if match: 4623 namespace_manager.set_namespace(match.group('namespace'))
4624
4625 - def parse_id(self, value, field_type):
4626 return value
4627
4628 - def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
4629 myfields = {} 4630 for field in table: 4631 if isinstance(polymodel,Table) and field.name in polymodel.fields(): 4632 continue 4633 attr = {} 4634 if isinstance(field.custom_qualifier, dict): 4635 #this is custom properties to add to the GAE field declartion 4636 attr = field.custom_qualifier 4637 field_type = field.type 4638 if isinstance(field_type, SQLCustomType): 4639 ftype = self.types[field_type.native or field_type.type](**attr) 4640 elif isinstance(field_type, gae.Property): 4641 ftype = field_type 4642 elif field_type.startswith('id'): 4643 continue 4644 elif field_type.startswith('decimal'): 4645 precision, scale = field_type[7:].strip('()').split(',') 4646 precision = int(precision) 4647 scale = int(scale) 4648 ftype = GAEDecimalProperty(precision, scale, **attr) 4649 elif field_type.startswith('reference'): 4650 if field.notnull: 4651 attr = dict(required=True) 4652 referenced = field_type[10:].strip() 4653 ftype = self.types[field_type[:9]](referenced, **attr) 4654 elif field_type.startswith('list:reference'): 4655 if field.notnull: 4656 attr['required'] = True 4657 referenced = field_type[15:].strip() 4658 ftype = self.types[field_type[:14]](**attr) 4659 elif field_type.startswith('list:'): 4660 ftype = self.types[field_type](**attr) 4661 elif not field_type in self.types\ 4662 or not self.types[field_type]: 4663 raise SyntaxError('Field: unknown field type: %s' % field_type) 4664 else: 4665 ftype = self.types[field_type](**attr) 4666 myfields[field.name] = ftype 4667 if not polymodel: 4668 table._tableobj = classobj(table._tablename, (gae.Model, ), myfields) 4669 elif polymodel==True: 4670 table._tableobj = classobj(table._tablename, (PolyModel, ), myfields) 4671 elif isinstance(polymodel,Table): 4672 table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields) 4673 else: 4674 raise SyntaxError("polymodel must be None, True, a table or a tablename") 4675 return None
4676
4677 - def expand(self,expression,field_type=None):
4678 if isinstance(expression,Field): 4679 if expression.type in ('text', 'blob', 'json'): 4680 raise SyntaxError('AppEngine does not index by: %s' % expression.type) 4681 return expression.name 4682 elif isinstance(expression, (Expression, Query)): 4683 if not expression.second is None: 4684 return expression.op(expression.first, expression.second) 4685 elif not expression.first is None: 4686 return expression.op(expression.first) 4687 else: 4688 return expression.op() 4689 elif field_type: 4690 return self.represent(expression,field_type) 4691 elif isinstance(expression,(list,tuple)): 4692 return ','.join([self.represent(item,field_type) for item in expression]) 4693 else: 4694 return str(expression)
4695 4696 ### TODO from gql.py Expression
4697 - def AND(self,first,second):
4698 a = self.expand(first) 4699 b = self.expand(second) 4700 if b[0].name=='__key__' and a[0].name!='__key__': 4701 return b+a 4702 return a+b
4703
4704 - def EQ(self,first,second=None):
4705 if isinstance(second, Key): 4706 return [GAEF(first.name,'=',second,lambda a,b:a==b)] 4707 return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
4708
4709 - def NE(self,first,second=None):
4710 if first.type != 'id': 4711 return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)] 4712 else: 4713 if not second is None: 4714 second = Key.from_path(first._tablename, long(second)) 4715 return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
4716
4717 - def LT(self,first,second=None):
4718 if first.type != 'id': 4719 return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)] 4720 else: 4721 second = Key.from_path(first._tablename, long(second)) 4722 return [GAEF(first.name,'<',second,lambda a,b:a<b)]
4723
4724 - def LE(self,first,second=None):
4725 if first.type != 'id': 4726 return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)] 4727 else: 4728 second = Key.from_path(first._tablename, long(second)) 4729 return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
4730
4731 - def GT(self,first,second=None):
4732 if first.type != 'id' or second==0 or second == '0': 4733 return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)] 4734 else: 4735 second = Key.from_path(first._tablename, long(second)) 4736 return [GAEF(first.name,'>',second,lambda a,b:a>b)]
4737
4738 - def GE(self,first,second=None):
4739 if first.type != 'id': 4740 return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)] 4741 else: 4742 second = Key.from_path(first._tablename, long(second)) 4743 return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
4744
4745 - def INVERT(self,first):
4746 return '-%s' % first.name
4747
4748 - def COMMA(self,first,second):
4749 return '%s, %s' % (self.expand(first),self.expand(second))
4750
4751 - def BELONGS(self,first,second=None):
4752 if not isinstance(second,(list, tuple)): 4753 raise SyntaxError("Not supported") 4754 if first.type != 'id': 4755 return [GAEF(first.name,'in',self.represent(second,first.type),lambda a,b:a in b)] 4756 else: 4757 second = [Key.from_path(first._tablename, int(i)) for i in second] 4758 return [GAEF(first.name,'in',second,lambda a,b:a in b)]
4759
4760 - def CONTAINS(self,first,second,case_sensitive=False):
4761 # silently ignoring: GAE can only do case sensitive matches! 4762 if not first.type.startswith('list:'): 4763 raise SyntaxError("Not supported") 4764 return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:b in a)]
4765
4766 - def NOT(self,first):
4767 nops = { self.EQ: self.NE, 4768 self.NE: self.EQ, 4769 self.LT: self.GE, 4770 self.GT: self.LE, 4771 self.LE: self.GT, 4772 self.GE: self.LT} 4773 if not isinstance(first,Query): 4774 raise SyntaxError("Not suported") 4775 nop = nops.get(first.op,None) 4776 if not nop: 4777 raise SyntaxError("Not suported %s" % first.op.__name__) 4778 first.op = nop 4779 return self.expand(first)
4780
4781 - def truncate(self,table,mode):
4782 self.db(self.db._adapter.id_query(table)).delete()
4783
4784 - def select_raw(self,query,fields=None,attributes=None):
4785 db = self.db 4786 fields = fields or [] 4787 attributes = attributes or {} 4788 args_get = attributes.get 4789 new_fields = [] 4790 for item in fields: 4791 if isinstance(item,SQLALL): 4792 new_fields += item._table 4793 else: 4794 new_fields.append(item) 4795 fields = new_fields 4796 if query: 4797 tablename = self.get_table(query) 4798 elif fields: 4799 tablename = fields[0].tablename 4800 query = db._adapter.id_query(fields[0].table) 4801 else: 4802 raise SyntaxError("Unable to determine a tablename") 4803 4804 if query: 4805 if use_common_filters(query): 4806 query = self.common_filter(query,[tablename]) 4807 4808 #tableobj is a GAE Model class (or subclass) 4809 tableobj = db[tablename]._tableobj 4810 filters = self.expand(query) 4811 4812 projection = None 4813 if len(db[tablename].fields) == len(fields): 4814 #getting all fields, not a projection query 4815 projection = None 4816 elif args_get('projection') == True: 4817 projection = [] 4818 for f in fields: 4819 if f.type in ['text', 'blob', 'json']: 4820 raise SyntaxError( 4821 "text and blob field types not allowed in projection queries") 4822 else: 4823 projection.append(f.name) 4824 elif args_get('filterfields') == True: 4825 projection = [] 4826 for f in fields: 4827 projection.append(f.name) 4828 4829 # real projection's can't include 'id'. 4830 # it will be added to the result later 4831 query_projection = [ 4832 p for p in projection if \ 4833 p != db[tablename]._id.name] if projection and \ 4834 args_get('projection') == True\ 4835 else None 4836 4837 cursor = None 4838 if isinstance(args_get('reusecursor'), str): 4839 cursor = args_get('reusecursor') 4840 items = gae.Query(tableobj, projection=query_projection, 4841 cursor=cursor) 4842 4843 for filter in filters: 4844 if args_get('projection') == True and \ 4845 filter.name in query_projection and \ 4846 filter.op in ['=', '<=', '>=']: 4847 raise SyntaxError( 4848 "projection fields cannot have equality filters") 4849 if filter.name=='__key__' and filter.op=='>' and filter.value==0: 4850 continue 4851 elif filter.name=='__key__' and filter.op=='=': 4852 if filter.value==0: 4853 items = [] 4854 elif isinstance(filter.value, Key): 4855 # key qeuries return a class instance, 4856 # can't use projection 4857 # extra values will be ignored in post-processing later 4858 item = tableobj.get(filter.value) 4859 items = (item and [item]) or [] 4860 else: 4861 # key qeuries return a class instance, 4862 # can't use projection 4863 # extra values will be ignored in post-processing later 4864 item = tableobj.get_by_id(filter.value) 4865 items = (item and [item]) or [] 4866 elif isinstance(items,list): # i.e. there is a single record! 4867 items = [i for i in items if filter.apply( 4868 getattr(item,filter.name),filter.value)] 4869 else: 4870 if filter.name=='__key__' and filter.op != 'in': 4871 items.order('__key__') 4872 items = items.filter('%s %s' % (filter.name,filter.op), 4873 filter.value) 4874 if not isinstance(items,list): 4875 if args_get('left', None): 4876 raise SyntaxError('Set: no left join in appengine') 4877 if args_get('groupby', None): 4878 raise SyntaxError('Set: no groupby in appengine') 4879 orderby = args_get('orderby', False) 4880 if orderby: 4881 ### THIS REALLY NEEDS IMPROVEMENT !!! 4882 if isinstance(orderby, (list, tuple)): 4883 orderby = xorify(orderby) 4884 if isinstance(orderby,Expression): 4885 orderby = self.expand(orderby) 4886 orders = orderby.split(', ') 4887 for order in orders: 4888 order={'-id':'-__key__','id':'__key__'}.get(order,order) 4889 items = items.order(order) 4890 if args_get('limitby', None): 4891 (lmin, lmax) = attributes['limitby'] 4892 (limit, offset) = (lmax - lmin, lmin) 4893 rows = items.fetch(limit,offset=offset) 4894 #cursor is only useful if there was a limit and we didn't return 4895 # all results 4896 if args_get('reusecursor'): 4897 db['_lastcursor'] = items.cursor() 4898 items = rows 4899 return (items, tablename, projection or db[tablename].fields)
4900
4901 - def select(self,query,fields,attributes):
4902 """ 4903 This is the GAE version of select. some notes to consider: 4904 - db['_lastsql'] is not set because there is not SQL statement string 4905 for a GAE query 4906 - 'nativeRef' is a magical fieldname used for self references on GAE 4907 - optional attribute 'projection' when set to True will trigger 4908 use of the GAE projection queries. note that there are rules for 4909 what is accepted imposed by GAE: each field must be indexed, 4910 projection queries cannot contain blob or text fields, and you 4911 cannot use == and also select that same field. see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection 4912 - optional attribute 'filterfields' when set to True web2py will only 4913 parse the explicitly listed fields into the Rows object, even though 4914 all fields are returned in the query. This can be used to reduce 4915 memory usage in cases where true projection queries are not 4916 usable. 4917 - optional attribute 'reusecursor' allows use of cursor with queries 4918 that have the limitby attribute. Set the attribute to True for the 4919 first query, set it to the value of db['_lastcursor'] to continue 4920 a previous query. The user must save the cursor value between 4921 requests, and the filters must be identical. It is up to the user 4922 to follow google's limitations: https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors 4923 """ 4924 4925 (items, tablename, fields) = self.select_raw(query,fields,attributes) 4926 # self.db['_lastsql'] = self._select(query,fields,attributes) 4927 rows = [[(t==self.db[tablename]._id.name and item) or \ 4928 (t=='nativeRef' and item) or getattr(item, t) \ 4929 for t in fields] for item in items] 4930 colnames = ['%s.%s' % (tablename, t) for t in fields] 4931 processor = attributes.get('processor',self.parse) 4932 return processor(rows,fields,colnames,False)
4933
4934 - def count(self,query,distinct=None,limit=None):
4935 if distinct: 4936 raise RuntimeError("COUNT DISTINCT not supported") 4937 (items, tablename, fields) = self.select_raw(query) 4938 # self.db['_lastsql'] = self._count(query) 4939 try: 4940 return len(items) 4941 except TypeError: 4942 return items.count(limit=limit)
4943
4944 - def delete(self,tablename, query):
4945 """ 4946 This function was changed on 2010-05-04 because according to 4947 http://code.google.com/p/googleappengine/issues/detail?id=3119 4948 GAE no longer supports deleting more than 1000 records. 4949 """ 4950 # self.db['_lastsql'] = self._delete(tablename,query) 4951 (items, tablename, fields) = self.select_raw(query) 4952 # items can be one item or a query 4953 if not isinstance(items,list): 4954 #use a keys_only query to ensure that this runs as a datastore 4955 # small operations 4956 leftitems = items.fetch(1000, keys_only=True) 4957 counter = 0 4958 while len(leftitems): 4959 counter += len(leftitems) 4960 gae.delete(leftitems) 4961 leftitems = items.fetch(1000, keys_only=True) 4962 else: 4963 counter = len(items) 4964 gae.delete(items) 4965 return counter
4966
4967 - def update(self,tablename,query,update_fields):
4968 # self.db['_lastsql'] = self._update(tablename,query,update_fields) 4969 (items, tablename, fields) = self.select_raw(query) 4970 counter = 0 4971 for item in items: 4972 for field, value in update_fields: 4973 setattr(item, field.name, self.represent(value,field.type)) 4974 item.put() 4975 counter += 1 4976 LOGGER.info(str(counter)) 4977 return counter
4978
4979 - def insert(self,table,fields):
4980 dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields) 4981 # table._db['_lastsql'] = self._insert(table,fields) 4982 tmp = table._tableobj(**dfields) 4983 tmp.put() 4984 rid = Reference(tmp.key().id()) 4985 (rid._table, rid._record, rid._gaekey) = (table, None, tmp.key()) 4986 return rid
4987
4988 - def bulk_insert(self,table,items):
4989 parsed_items = [] 4990 for item in items: 4991 dfields=dict((f.name,self.represent(v,f.type)) for f,v in item) 4992 parsed_items.append(table._tableobj(**dfields)) 4993 gae.put(parsed_items) 4994 return True
4995
4996 -def uuid2int(uuidv):
4997 return uuid.UUID(uuidv).int
4998
4999 -def int2uuid(n):
5000 return str(uuid.UUID(int=n))
5001
5002 -class CouchDBAdapter(NoSQLAdapter):
5003 drivers = ('couchdb',) 5004 5005 uploads_in_blob = True 5006 types = { 5007 'boolean': bool, 5008 'string': str, 5009 'text': str, 5010 'json': str, 5011 'password': str, 5012 'blob': str, 5013 'upload': str, 5014 'integer': long, 5015 'bigint': long, 5016 'float': float, 5017 'double': float, 5018 'date': datetime.date, 5019 'time': datetime.time, 5020 'datetime': datetime.datetime, 5021 'id': long, 5022 'reference': long, 5023 'list:string': list, 5024 'list:integer': list, 5025 'list:reference': list, 5026 } 5027
5028 - def file_exists(self, filename): pass
5029 - def file_open(self, filename, mode='rb', lock=True): pass
5030 - def file_close(self, fileobj): pass
5031
5032 - def expand(self,expression,field_type=None):
5033 if isinstance(expression,Field): 5034 if expression.type=='id': 5035 return "%s._id" % expression.tablename 5036 return BaseAdapter.expand(self,expression,field_type)
5037
5038 - def AND(self,first,second):
5039 return '(%s && %s)' % (self.expand(first),self.expand(second))
5040
5041 - def OR(self,first,second):
5042 return '(%s || %s)' % (self.expand(first),self.expand(second))
5043
5044 - def EQ(self,first,second):
5045 if second is None: 5046 return '(%s == null)' % self.expand(first) 5047 return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
5048
5049 - def NE(self,first,second):
5050 if second is None: 5051 return '(%s != null)' % self.expand(first) 5052 return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
5053
5054 - def COMMA(self,first,second):
5055 return '%s + %s' % (self.expand(first),self.expand(second))
5056
5057 - def represent(self, obj, fieldtype):
5058 value = NoSQLAdapter.represent(self, obj, fieldtype) 5059 if fieldtype=='id': 5060 return repr(str(long(value))) 5061 elif fieldtype in ('date','time','datetime','boolean'): 5062 return serializers.json(value) 5063 return repr(not isinstance(value,unicode) and value \ 5064 or value and value.encode('utf8'))
5065
5066 - def __init__(self,db,uri='couchdb://127.0.0.1:5984', 5067 pool_size=0,folder=None,db_codec ='UTF-8', 5068 credential_decoder=IDENTITY, driver_args={}, 5069 adapter_args={}, do_connect=True, after_connection=None):
5070 self.db = db 5071 self.uri = uri 5072 if do_connect: self.find_driver(adapter_args) 5073 self.dbengine = 'couchdb' 5074 self.folder = folder 5075 db['_lastsql'] = '' 5076 self.db_codec = 'UTF-8' 5077 self._after_connection = after_connection 5078 self.pool_size = pool_size 5079 5080 url='http://'+uri[10:] 5081 def connector(url=url,driver_args=driver_args): 5082 return self.driver.Server(url,**driver_args)
5083 self.reconnect(connector,cursor=False)
5084
5085 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
5086 if migrate: 5087 try: 5088 self.connection.create(table._tablename) 5089 except: 5090 pass
5091
5092 - def insert(self,table,fields):
5093 id = uuid2int(web2py_uuid()) 5094 ctable = self.connection[table._tablename] 5095 values = dict((k.name,self.represent(v,k.type)) for k,v in fields) 5096 values['_id'] = str(id) 5097 ctable.save(values) 5098 return id
5099
5100 - def _select(self,query,fields,attributes):
5101 if not isinstance(query,Query): 5102 raise SyntaxError("Not Supported") 5103 for key in set(attributes.keys())-SELECT_ARGS: 5104 raise SyntaxError('invalid select attribute: %s' % key) 5105 new_fields=[] 5106 for item in fields: 5107 if isinstance(item,SQLALL): 5108 new_fields += item._table 5109 else: 5110 new_fields.append(item) 5111 def uid(fd): 5112 return fd=='id' and '_id' or fd
5113 def get(row,fd): 5114 return fd=='id' and long(row['_id']) or row.get(fd,None) 5115 fields = new_fields 5116 tablename = self.get_table(query) 5117 fieldnames = [f.name for f in (fields or self.db[tablename])] 5118 colnames = ['%s.%s' % (tablename,k) for k in fieldnames] 5119 fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames]) 5120 fn="(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" %\ 5121 dict(t=tablename, 5122 query=self.expand(query), 5123 order='%s._id' % tablename, 5124 fields=fields) 5125 return fn, colnames 5126
5127 - def select(self,query,fields,attributes):
5128 if not isinstance(query,Query): 5129 raise SyntaxError("Not Supported") 5130 fn, colnames = self._select(query,fields,attributes) 5131 tablename = colnames[0].split('.')[0] 5132 ctable = self.connection[tablename] 5133 rows = [cols['value'] for cols in ctable.query(fn)] 5134 processor = attributes.get('processor',self.parse) 5135 return processor(rows,fields,colnames,False)
5136
5137 - def delete(self,tablename,query):
5138 if not isinstance(query,Query): 5139 raise SyntaxError("Not Supported") 5140 if query.first.type=='id' and query.op==self.EQ: 5141 id = query.second 5142 tablename = query.first.tablename 5143 assert(tablename == query.first.tablename) 5144 ctable = self.connection[tablename] 5145 try: 5146 del ctable[str(id)] 5147 return 1 5148 except couchdb.http.ResourceNotFound: 5149 return 0 5150 else: 5151 tablename = self.get_table(query) 5152 rows = self.select(query,[self.db[tablename]._id],{}) 5153 ctable = self.connection[tablename] 5154 for row in rows: 5155 del ctable[str(row.id)] 5156 return len(rows)
5157
5158 - def update(self,tablename,query,fields):
5159 if not isinstance(query,Query): 5160 raise SyntaxError("Not Supported") 5161 if query.first.type=='id' and query.op==self.EQ: 5162 id = query.second 5163 tablename = query.first.tablename 5164 ctable = self.connection[tablename] 5165 try: 5166 doc = ctable[str(id)] 5167 for key,value in fields: 5168 doc[key.name] = self.represent(value,self.db[tablename][key.name].type) 5169 ctable.save(doc) 5170 return 1 5171 except couchdb.http.ResourceNotFound: 5172 return 0 5173 else: 5174 tablename = self.get_table(query) 5175 rows = self.select(query,[self.db[tablename]._id],{}) 5176 ctable = self.connection[tablename] 5177 table = self.db[tablename] 5178 for row in rows: 5179 doc = ctable[str(row.id)] 5180 for key,value in fields: 5181 doc[key.name] = self.represent(value,table[key.name].type) 5182 ctable.save(doc) 5183 return len(rows)
5184
5185 - def count(self,query,distinct=None):
5186 if distinct: 5187 raise RuntimeError("COUNT DISTINCT not supported") 5188 if not isinstance(query,Query): 5189 raise SyntaxError("Not Supported") 5190 tablename = self.get_table(query) 5191 rows = self.select(query,[self.db[tablename]._id],{}) 5192 return len(rows)
5193
5194 -def cleanup(text):
5195 """ 5196 validates that the given text is clean: only contains [0-9a-zA-Z_] 5197 """ 5198 if not REGEX_ALPHANUMERIC.match(text): 5199 raise SyntaxError('invalid table or field name: %s' % text) 5200 return text
5201
5202 -class MongoDBAdapter(NoSQLAdapter):
5203 native_json = True 5204 drivers = ('pymongo',) 5205 5206 uploads_in_blob = True 5207 5208 types = { 5209 'boolean': bool, 5210 'string': str, 5211 'text': str, 5212 'json': str, 5213 'password': str, 5214 'blob': str, 5215 'upload': str, 5216 'integer': long, 5217 'bigint': long, 5218 'float': float, 5219 'double': float, 5220 'date': datetime.date, 5221 'time': datetime.time, 5222 'datetime': datetime.datetime, 5223 'id': long, 5224 'reference': long, 5225 'list:string': list, 5226 'list:integer': list, 5227 'list:reference': list, 5228 } 5229 5230 error_messages = {"javascript_needed": "This must yet be replaced" + 5231 " with javascript in order to work."} 5232
5233 - def __init__(self,db,uri='mongodb://127.0.0.1:5984/db', 5234 pool_size=0, folder=None, db_codec ='UTF-8', 5235 credential_decoder=IDENTITY, driver_args={}, 5236 adapter_args={}, do_connect=True, after_connection=None):
5237 5238 self.db = db 5239 self.uri = uri 5240 if do_connect: self.find_driver(adapter_args) 5241 import random 5242 from bson.objectid import ObjectId 5243 from bson.son import SON 5244 import pymongo.uri_parser 5245 5246 m = pymongo.uri_parser.parse_uri(uri) 5247 5248 self.SON = SON 5249 self.ObjectId = ObjectId 5250 self.random = random 5251 5252 self.dbengine = 'mongodb' 5253 self.folder = folder 5254 db['_lastsql'] = '' 5255 self.db_codec = 'UTF-8' 5256 self._after_connection = after_connection 5257 self.pool_size = pool_size 5258 #this is the minimum amount of replicates that it should wait 5259 # for on insert/update 5260 self.minimumreplication = adapter_args.get('minimumreplication',0) 5261 # by default all inserts and selects are performand asynchronous, 5262 # but now the default is 5263 # synchronous, except when overruled by either this default or 5264 # function parameter 5265 self.safe = adapter_args.get('safe',True) 5266 5267 if isinstance(m,tuple): 5268 m = {"database" : m[1]} 5269 if m.get('database')==None: 5270 raise SyntaxError("Database is required!") 5271 5272 def connector(uri=self.uri,m=m): 5273 # Connection() is deprecated 5274 if hasattr(self.driver, "MongoClient"): 5275 Connection = self.driver.MongoClient 5276 else: 5277 Connection = self.driver.Connection 5278 return Connection(uri)[m.get('database')]
5279 5280 self.reconnect(connector,cursor=False)
5281
5282 - def object_id(self, arg=None):
5283 """ Convert input to a valid Mongodb ObjectId instance 5284 5285 self.object_id("<random>") -> ObjectId (not unique) instance """ 5286 if not arg: 5287 arg = 0 5288 if isinstance(arg, basestring): 5289 # we assume an integer as default input 5290 rawhex = len(arg.replace("0x", "").replace("L", "")) == 24 5291 if arg.isdigit() and (not rawhex): 5292 arg = int(arg) 5293 elif arg == "<random>": 5294 arg = int("0x%sL" % \ 5295 "".join([self.random.choice("0123456789abcdef") \ 5296 for x in range(24)]), 0) 5297 elif arg.isalnum(): 5298 if not arg.startswith("0x"): 5299 arg = "0x%s" % arg 5300 try: 5301 arg = int(arg, 0) 5302 except ValueError, e: 5303 raise ValueError( 5304 "invalid objectid argument string: %s" % e) 5305 else: 5306 raise ValueError("Invalid objectid argument string. " + 5307 "Requires an integer or base 16 value") 5308 elif isinstance(arg, self.ObjectId): 5309 return arg 5310 5311 if not isinstance(arg, (int, long)): 5312 raise TypeError("object_id argument must be of type " + 5313 "ObjectId or an objectid representable integer") 5314 if arg == 0: 5315 hexvalue = "".zfill(24) 5316 else: 5317 hexvalue = hex(arg)[2:].replace("L", "") 5318 return self.ObjectId(hexvalue)
5319
5320 - def parse_reference(self, value, field_type):
5321 # here we have to check for ObjectID before base parse 5322 if isinstance(value, self.ObjectId): 5323 value = long(str(value), 16) 5324 return super(MongoDBAdapter, 5325 self).parse_reference(value, field_type)
5326
5327 - def parse_id(self, value, field_type):
5328 if isinstance(value, self.ObjectId): 5329 value = long(str(value), 16) 5330 return super(MongoDBAdapter, 5331 self).parse_id(value, field_type)
5332
5333 - def represent(self, obj, fieldtype):
5334 # the base adatpter does not support MongoDB ObjectId 5335 if isinstance(obj, self.ObjectId): 5336 value = obj 5337 else: 5338 value = NoSQLAdapter.represent(self, obj, fieldtype) 5339 # reference types must be convert to ObjectID 5340 if fieldtype =='date': 5341 if value == None: 5342 return value 5343 # this piece of data can be stripped off based on the fieldtype 5344 t = datetime.time(0, 0, 0) 5345 # mongodb doesn't has a date object and so it must datetime, 5346 # string or integer 5347 return datetime.datetime.combine(value, t) 5348 elif fieldtype == 'time': 5349 if value == None: 5350 return value 5351 # this piece of data can be stripped of based on the fieldtype 5352 d = datetime.date(2000, 1, 1) 5353 # mongodb doesn't has a time object and so it must datetime, 5354 # string or integer 5355 return datetime.datetime.combine(d, value) 5356 elif (isinstance(fieldtype, basestring) and 5357 fieldtype.startswith('list:')): 5358 if fieldtype.startswith('list:reference'): 5359 newval = [] 5360 for v in value: 5361 newval.append(self.object_id(v)) 5362 return newval 5363 return value 5364 elif ((isinstance(fieldtype, basestring) and 5365 fieldtype.startswith("reference")) or 5366 (isinstance(fieldtype, Table)) or fieldtype=="id"): 5367 value = self.object_id(value) 5368 return value
5369
5370 - def create_table(self, table, migrate=True, fake_migrate=False, 5371 polymodel=None, isCapped=False):
5372 if isCapped: 5373 raise RuntimeError("Not implemented")
5374
5375 - def count(self, query, distinct=None, snapshot=True):
5376 if distinct: 5377 raise RuntimeError("COUNT DISTINCT not supported") 5378 if not isinstance(query,Query): 5379 raise SyntaxError("Not Supported") 5380 tablename = self.get_table(query) 5381 return long(self.select(query,[self.db[tablename]._id], {}, 5382 count=True,snapshot=snapshot)['count'])
5383 # Maybe it would be faster if we just implemented the pymongo 5384 # .count() function which is probably quicker? 5385 # therefor call __select() connection[table].find(query).count() 5386 # Since this will probably reduce the return set? 5387
5388 - def expand(self, expression, field_type=None):
5389 if isinstance(expression, Query): 5390 # any query using 'id':= 5391 # set name as _id (as per pymongo/mongodb primary key) 5392 # convert second arg to an objectid field 5393 # (if its not already) 5394 # if second arg is 0 convert to objectid 5395 if isinstance(expression.first,Field) and \ 5396 ((expression.first.type == 'id') or \ 5397 ("reference" in expression.first.type)): 5398 if expression.first.type == 'id': 5399 expression.first.name = '_id' 5400 # cast to Mongo ObjectId 5401 if isinstance(expression.second, (tuple, list, set)): 5402 expression.second = [self.object_id(item) for 5403 item in expression.second] 5404 else: 5405 expression.second = self.object_id(expression.second) 5406 result = expression.op(expression.first, expression.second) 5407 5408 if isinstance(expression, Field): 5409 if expression.type=='id': 5410 result = "_id" 5411 else: 5412 result = expression.name 5413 elif isinstance(expression, (Expression, Query)): 5414 if not expression.second is None: 5415 result = expression.op(expression.first, expression.second) 5416 elif not expression.first is None: 5417 result = expression.op(expression.first) 5418 elif not isinstance(expression.op, str): 5419 result = expression.op() 5420 else: 5421 result = expression.op 5422 elif field_type: 5423 result = self.represent(expression,field_type) 5424 elif isinstance(expression,(list,tuple)): 5425 result = ','.join(self.represent(item,field_type) for 5426 item in expression) 5427 else: 5428 result = expression 5429 return result
5430
5431 - def drop(self, table, mode=''):
5432 ctable = self.connection[table._tablename] 5433 ctable.drop()
5434
5435 - def truncate(self, table, mode, safe=None):
5436 if safe == None: 5437 safe=self.safe 5438 ctable = self.connection[table._tablename] 5439 ctable.remove(None, safe=True)
5440
5441 - def _select(self, query, fields, attributes):
5442 if 'for_update' in attributes: 5443 logging.warn('mongodb does not support for_update') 5444 for key in set(attributes.keys())-set(('limitby', 5445 'orderby','for_update')): 5446 if attributes[key]!=None: 5447 logging.warn('select attribute not implemented: %s' % key) 5448 5449 new_fields=[] 5450 mongosort_list = [] 5451 5452 # try an orderby attribute 5453 orderby = attributes.get('orderby', False) 5454 limitby = attributes.get('limitby', False) 5455 # distinct = attributes.get('distinct', False) 5456 if orderby: 5457 if isinstance(orderby, (list, tuple)): 5458 orderby = xorify(orderby) 5459 5460 # !!!! need to add 'random' 5461 for f in self.expand(orderby).split(','): 5462 if f.startswith('-'): 5463 mongosort_list.append((f[1:], -1)) 5464 else: 5465 mongosort_list.append((f, 1)) 5466 if limitby: 5467 limitby_skip, limitby_limit = limitby[0], int(limitby[1]) 5468 else: 5469 limitby_skip = limitby_limit = 0 5470 5471 mongofields_dict = self.SON() 5472 mongoqry_dict = {} 5473 for item in fields: 5474 if isinstance(item, SQLALL): 5475 new_fields += item._table 5476 else: 5477 new_fields.append(item) 5478 fields = new_fields 5479 if isinstance(query,Query): 5480 tablename = self.get_table(query) 5481 elif len(fields) != 0: 5482 tablename = fields[0].tablename 5483 else: 5484 raise SyntaxError("The table name could not be found in " + 5485 "the query nor from the select statement.") 5486 mongoqry_dict = self.expand(query) 5487 fields = fields or self.db[tablename] 5488 for field in fields: 5489 mongofields_dict[field.name] = 1 5490 5491 return tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5492 limitby_limit, limitby_skip
5493
5494 - def select(self, query, fields, attributes, count=False, 5495 snapshot=False):
5496 # TODO: support joins 5497 tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5498 limitby_limit, limitby_skip = self._select(query, fields, attributes) 5499 ctable = self.connection[tablename] 5500 5501 if count: 5502 return {'count' : ctable.find( 5503 mongoqry_dict, mongofields_dict, 5504 skip=limitby_skip, limit=limitby_limit, 5505 sort=mongosort_list, snapshot=snapshot).count()} 5506 else: 5507 # pymongo cursor object 5508 mongo_list_dicts = ctable.find(mongoqry_dict, 5509 mongofields_dict, skip=limitby_skip, 5510 limit=limitby_limit, sort=mongosort_list, 5511 snapshot=snapshot) 5512 rows = [] 5513 # populate row in proper order 5514 # Here we replace ._id with .id to follow the standard naming 5515 colnames = [] 5516 newnames = [] 5517 for field in fields: 5518 colname = str(field) 5519 colnames.append(colname) 5520 tablename, fieldname = colname.split(".") 5521 if fieldname == "_id": 5522 # Mongodb reserved uuid key 5523 field.name = "id" 5524 newnames.append(".".join((tablename, field.name))) 5525 5526 for record in mongo_list_dicts: 5527 row=[] 5528 for colname in colnames: 5529 tablename, fieldname = colname.split(".") 5530 # switch to Mongo _id uuids for retrieving 5531 # record id's 5532 if fieldname == "id": fieldname = "_id" 5533 if fieldname in record: 5534 value = record[fieldname] 5535 else: 5536 value = None 5537 row.append(value) 5538 rows.append(row) 5539 5540 processor = attributes.get('processor', self.parse) 5541 result = processor(rows, fields, newnames, False) 5542 return result
5543
5544 - def _insert(self, table, fields):
5545 values = dict() 5546 for k, v in fields: 5547 if not k.name in ["id", "safe"]: 5548 fieldname = k.name 5549 fieldtype = table[k.name].type 5550 values[fieldname] = self.represent(v, fieldtype) 5551 return values
5552 5553 # Safe determines whether a asynchronious request is done or a 5554 # synchronious action is done 5555 # For safety, we use by default synchronous requests
5556 - def insert(self, table, fields, safe=None):
5557 if safe==None: 5558 safe = self.safe 5559 ctable = self.connection[table._tablename] 5560 values = self._insert(table, fields) 5561 ctable.insert(values, safe=safe) 5562 return long(str(values['_id']), 16)
5563 5564 #this function returns a dict with the where clause and update fields
5565 - def _update(self, tablename, query, fields):
5566 if not isinstance(query, Query): 5567 raise SyntaxError("Not Supported") 5568 filter = None 5569 if query: 5570 filter = self.expand(query) 5571 # do not try to update id fields to avoid backend errors 5572 modify = {'$set': dict((k.name, self.represent(v, k.type)) for 5573 k, v in fields if (not k.name in ("_id", "id")))} 5574 return modify, filter
5575
5576 - def update(self, tablename, query, fields, safe=None):
5577 if safe == None: 5578 safe = self.safe 5579 # return amount of adjusted rows or zero, but no exceptions 5580 # @ related not finding the result 5581 if not isinstance(query, Query): 5582 raise RuntimeError("Not implemented") 5583 amount = self.count(query, False) 5584 modify, filter = self._update(tablename, query, fields) 5585 try: 5586 result = self.connection[tablename].update(filter, 5587 modify, multi=True, safe=safe) 5588 if safe: 5589 try: 5590 # if result count is available fetch it 5591 return result["n"] 5592 except (KeyError, AttributeError, TypeError): 5593 return amount 5594 else: 5595 return amount 5596 except Exception, e: 5597 # TODO Reverse update query to verifiy that the query succeded 5598 raise RuntimeError("uncaught exception when updating rows: %s" % e)
5599
5600 - def _delete(self, tablename, query):
5601 if not isinstance(query, Query): 5602 raise RuntimeError("query type %s is not supported" % \ 5603 type(query)) 5604 return self.expand(query)
5605
5606 - def delete(self, tablename, query, safe=None):
5607 if safe is None: 5608 safe = self.safe 5609 amount = 0 5610 amount = self.count(query, False) 5611 filter = self._delete(tablename, query) 5612 self.connection[tablename].remove(filter, safe=safe) 5613 return amount
5614
5615 - def bulk_insert(self, table, items):
5616 return [self.insert(table,item) for item in items]
5617 5618 ## OPERATORS
5619 - def INVERT(self, first):
5620 #print "in invert first=%s" % first 5621 return '-%s' % self.expand(first)
5622 5623 # TODO This will probably not work:(
5624 - def NOT(self, first):
5625 result = {} 5626 result["$not"] = self.expand(first) 5627 return result
5628
5629 - def AND(self,first,second):
5630 f = self.expand(first) 5631 s = self.expand(second) 5632 f.update(s) 5633 return f
5634
5635 - def OR(self,first,second):
5636 # pymongo expects: .find({'$or': [{'name':'1'}, {'name':'2'}]}) 5637 result = {} 5638 f = self.expand(first) 5639 s = self.expand(second) 5640 result['$or'] = [f,s] 5641 return result
5642
5643 - def BELONGS(self, first, second):
5644 if isinstance(second, str): 5645 return {self.expand(first) : {"$in" : [ second[:-1]]} } 5646 elif second==[] or second==() or second==set(): 5647 return {1:0} 5648 items = [self.expand(item, first.type) for item in second] 5649 return {self.expand(first) : {"$in" : items} }
5650
5651 - def EQ(self,first,second):
5652 result = {} 5653 result[self.expand(first)] = self.expand(second) 5654 return result
5655
5656 - def NE(self, first, second=None):
5657 result = {} 5658 result[self.expand(first)] = {'$ne': self.expand(second)} 5659 return result
5660
5661 - def LT(self,first,second=None):
5662 if second is None: 5663 raise RuntimeError("Cannot compare %s < None" % first) 5664 result = {} 5665 result[self.expand(first)] = {'$lt': self.expand(second)} 5666 return result
5667
5668 - def LE(self,first,second=None):
5669 if second is None: 5670 raise RuntimeError("Cannot compare %s <= None" % first) 5671 result = {} 5672 result[self.expand(first)] = {'$lte': self.expand(second)} 5673 return result
5674
5675 - def GT(self,first,second):
5676 result = {} 5677 result[self.expand(first)] = {'$gt': self.expand(second)} 5678 return result
5679
5680 - def GE(self,first,second=None):
5681 if second is None: 5682 raise RuntimeError("Cannot compare %s >= None" % first) 5683 result = {} 5684 result[self.expand(first)] = {'$gte': self.expand(second)} 5685 return result
5686
5687 - def ADD(self, first, second):
5688 raise NotImplementedError(self.error_messages["javascript_needed"]) 5689 return '%s + %s' % (self.expand(first), 5690 self.expand(second, first.type))
5691
5692 - def SUB(self, first, second):
5693 raise NotImplementedError(self.error_messages["javascript_needed"]) 5694 return '(%s - %s)' % (self.expand(first), 5695 self.expand(second, first.type))
5696
5697 - def MUL(self, first, second):
5698 raise NotImplementedError(self.error_messages["javascript_needed"]) 5699 return '(%s * %s)' % (self.expand(first), 5700 self.expand(second, first.type))
5701
5702 - def DIV(self, first, second):
5703 raise NotImplementedError(self.error_messages["javascript_needed"]) 5704 return '(%s / %s)' % (self.expand(first), 5705 self.expand(second, first.type))
5706
5707 - def MOD(self, first, second):
5708 raise NotImplementedError(self.error_messages["javascript_needed"]) 5709 return '(%s %% %s)' % (self.expand(first), 5710 self.expand(second, first.type))
5711
5712 - def AS(self, first, second):
5713 raise NotImplementedError(self.error_messages["javascript_needed"]) 5714 return '%s AS %s' % (self.expand(first), second)
5715 5716 # We could implement an option that simulates a full featured SQL 5717 # database. But I think the option should be set explicit or 5718 # implemented as another library.
5719 - def ON(self, first, second):
5720 raise NotImplementedError("This is not possible in NoSQL" + 5721 " but can be simulated with a wrapper.") 5722 return '%s ON %s' % (self.expand(first), self.expand(second))
5723 5724 # BLOW ARE TWO IMPLEMENTATIONS OF THE SAME FUNCITONS 5725 # WHICH ONE IS BEST? 5726
5727 - def COMMA(self, first, second):
5728 return '%s, %s' % (self.expand(first), self.expand(second))
5729
5730 - def LIKE(self, first, second):
5731 #escaping regex operators? 5732 return {self.expand(first): ('%s' % \ 5733 self.expand(second, 'string').replace('%','/'))}
5734
5735 - def STARTSWITH(self, first, second):
5736 #escaping regex operators? 5737 return {self.expand(first): ('/^%s/' % \ 5738 self.expand(second, 'string'))}
5739
5740 - def ENDSWITH(self, first, second):
5741 #escaping regex operators? 5742 return {self.expand(first): ('/%s^/' % \ 5743 self.expand(second, 'string'))}
5744
5745 - def CONTAINS(self, first, second, case_sensitive=False):
5746 # silently ignore, only case sensitive 5747 # There is a technical difference, but mongodb doesn't support 5748 # that, but the result will be the same 5749 val = second if isinstance(second,self.ObjectId) else \ 5750 {'$regex':".*" + re.escape(self.expand(second, 'string')) + ".*"} 5751 return {self.expand(first) : val}
5752
5753 - def LIKE(self, first, second):
5754 import re 5755 return {self.expand(first): {'$regex': \ 5756 re.escape(self.expand(second, 5757 'string')).replace('%','.*')}}
5758 5759 #TODO verify full compatibilty with official SQL Like operator
5760 - def STARTSWITH(self, first, second):
5761 #TODO Solve almost the same problem as with endswith 5762 import re 5763 return {self.expand(first): {'$regex' : '^' + 5764 re.escape(self.expand(second, 5765 'string'))}}
5766 5767 #TODO verify full compatibilty with official SQL Like operator
5768 - def ENDSWITH(self, first, second):
5769 #escaping regex operators? 5770 #TODO if searched for a name like zsa_corbitt and the function 5771 # is endswith('a') then this is also returned. 5772 # Aldo it end with a t 5773 import re 5774 return {self.expand(first): {'$regex': \ 5775 re.escape(self.expand(second, 'string')) + '$'}}
5776 5777 #TODO verify full compatibilty with official oracle contains operator
5778 - def CONTAINS(self, first, second, case_sensitive=False):
5779 # silently ignore, only case sensitive 5780 #There is a technical difference, but mongodb doesn't support 5781 # that, but the result will be the same 5782 #TODO contains operators need to be transformed to Regex 5783 return {self.expand(first) : {'$regex': \ 5784 ".*" + re.escape(self.expand(second, 'string')) + ".*"}}
5785
5786 5787 -class IMAPAdapter(NoSQLAdapter):
5788 drivers = ('imaplib',) 5789 5790 """ IMAP server adapter 5791 5792 This class is intended as an interface with 5793 email IMAP servers to perform simple queries in the 5794 web2py DAL query syntax, so email read, search and 5795 other related IMAP mail services (as those implemented 5796 by brands like Google(r), and Yahoo!(r) 5797 can be managed from web2py applications. 5798 5799 The code uses examples by Yuji Tomita on this post: 5800 http://yuji.wordpress.com/2011/06/22/python-imaplib-imap-example-with-gmail/#comment-1137 5801 and is based in docs for Python imaplib, python email 5802 and email IETF's (i.e. RFC2060 and RFC3501) 5803 5804 This adapter was tested with a small set of operations with Gmail(r). Other 5805 services requests could raise command syntax and response data issues. 5806 5807 It creates its table and field names "statically", 5808 meaning that the developer should leave the table and field 5809 definitions to the DAL instance by calling the adapter's 5810 .define_tables() method. The tables are defined with the 5811 IMAP server mailbox list information. 5812 5813 .define_tables() returns a dictionary mapping dal tablenames 5814 to the server mailbox names with the following structure: 5815 5816 {<tablename>: str <server mailbox name>} 5817 5818 Here is a list of supported fields: 5819 5820 Field Type Description 5821 ################################################################ 5822 uid string 5823 answered boolean Flag 5824 created date 5825 content list:string A list of text or html parts 5826 to string 5827 cc string 5828 bcc string 5829 size integer the amount of octets of the message* 5830 deleted boolean Flag 5831 draft boolean Flag 5832 flagged boolean Flag 5833 sender string 5834 recent boolean Flag 5835 seen boolean Flag 5836 subject string 5837 mime string The mime header declaration 5838 email string The complete RFC822 message** 5839 attachments <type list> Each non text part as dict 5840 encoding string The main detected encoding 5841 5842 *At the application side it is measured as the length of the RFC822 5843 message string 5844 5845 WARNING: As row id's are mapped to email sequence numbers, 5846 make sure your imap client web2py app does not delete messages 5847 during select or update actions, to prevent 5848 updating or deleting different messages. 5849 Sequence numbers change whenever the mailbox is updated. 5850 To avoid this sequence numbers issues, it is recommended the use 5851 of uid fields in query references (although the update and delete 5852 in separate actions rule still applies). 5853 5854 # This is the code recommended to start imap support 5855 # at the app's model: 5856 5857 imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl 5858 imapdb.define_tables() 5859 5860 Here is an (incomplete) list of possible imap commands: 5861 5862 # Count today's unseen messages 5863 # smaller than 6000 octets from the 5864 # inbox mailbox 5865 5866 q = imapdb.INBOX.seen == False 5867 q &= imapdb.INBOX.created == datetime.date.today() 5868 q &= imapdb.INBOX.size < 6000 5869 unread = imapdb(q).count() 5870 5871 # Fetch last query messages 5872 rows = imapdb(q).select() 5873 5874 # it is also possible to filter query select results with limitby and 5875 # sequences of mailbox fields 5876 5877 set.select(<fields sequence>, limitby=(<int>, <int>)) 5878 5879 # Mark last query messages as seen 5880 messages = [row.uid for row in rows] 5881 seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True) 5882 5883 # Delete messages in the imap database that have mails from mr. Gumby 5884 5885 deleted = 0 5886 for mailbox in imapdb.tables 5887 deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete() 5888 5889 # It is possible also to mark messages for deletion instead of ereasing them 5890 # directly with set.update(deleted=True) 5891 5892 5893 # This object give access 5894 # to the adapter auto mailbox 5895 # mapped names (which native 5896 # mailbox has what table name) 5897 5898 imapdb.mailboxes <dict> # tablename, server native name pairs 5899 5900 # To retrieve a table native mailbox name use: 5901 imapdb.<table>.mailbox 5902 5903 ### New features v2.4.1: 5904 5905 # Declare mailboxes statically with tablename, name pairs 5906 # This avoids the extra server names retrieval 5907 5908 imapdb.define_tables({"inbox": "INBOX"}) 5909 5910 # Selects without content/attachments/email columns will only 5911 # fetch header and flags 5912 5913 imapdb(q).select(imapdb.INBOX.sender, imapdb.INBOX.subject) 5914 """ 5915 5916 types = { 5917 'string': str, 5918 'text': str, 5919 'date': datetime.date, 5920 'datetime': datetime.datetime, 5921 'id': long, 5922 'boolean': bool, 5923 'integer': int, 5924 'bigint': long, 5925 'blob': str, 5926 'list:string': str, 5927 } 5928 5929 dbengine = 'imap' 5930 5931 REGEX_URI = re.compile('^(?P<user>[^:]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?$') 5932
5933 - def __init__(self, 5934 db, 5935 uri, 5936 pool_size=0, 5937 folder=None, 5938 db_codec ='UTF-8', 5939 credential_decoder=IDENTITY, 5940 driver_args={}, 5941 adapter_args={}, 5942 do_connect=True, 5943 after_connection=None):
5944 5945 # db uri: user@example.com:password@imap.server.com:123 5946 # TODO: max size adapter argument for preventing large mail transfers 5947 5948 self.db = db 5949 self.uri = uri 5950 if do_connect: self.find_driver(adapter_args) 5951 self.pool_size=pool_size 5952 self.folder = folder 5953 self.db_codec = db_codec 5954 self._after_connection = after_connection 5955 self.credential_decoder = credential_decoder 5956 self.driver_args = driver_args 5957 self.adapter_args = adapter_args 5958 self.mailbox_size = None 5959 self.static_names = None 5960 self.charset = sys.getfilesystemencoding() 5961 # imap class 5962 self.imap4 = None 5963 uri = uri.split("://")[1] 5964 5965 """ MESSAGE is an identifier for sequence number""" 5966 5967 self.flags = ['\\Deleted', '\\Draft', '\\Flagged', 5968 '\\Recent', '\\Seen', '\\Answered'] 5969 self.search_fields = { 5970 'id': 'MESSAGE', 'created': 'DATE', 5971 'uid': 'UID', 'sender': 'FROM', 5972 'to': 'TO', 'cc': 'CC', 5973 'bcc': 'BCC', 'content': 'TEXT', 5974 'size': 'SIZE', 'deleted': '\\Deleted', 5975 'draft': '\\Draft', 'flagged': '\\Flagged', 5976 'recent': '\\Recent', 'seen': '\\Seen', 5977 'subject': 'SUBJECT', 'answered': '\\Answered', 5978 'mime': None, 'email': None, 5979 'attachments': None 5980 } 5981 5982 db['_lastsql'] = '' 5983 5984 m = self.REGEX_URI.match(uri) 5985 user = m.group('user') 5986 password = m.group('password') 5987 host = m.group('host') 5988 port = int(m.group('port')) 5989 over_ssl = False 5990 if port==993: 5991 over_ssl = True 5992 5993 driver_args.update(host=host,port=port, password=password, user=user) 5994 def connector(driver_args=driver_args): 5995 # it is assumed sucessful authentication alLways 5996 # TODO: support direct connection and login tests 5997 if over_ssl: 5998 self.imap4 = self.driver.IMAP4_SSL 5999 else: 6000 self.imap4 = self.driver.IMAP4 6001 connection = self.imap4(driver_args["host"], driver_args["port"]) 6002 data = connection.login(driver_args["user"], driver_args["password"]) 6003 6004 # static mailbox list 6005 connection.mailbox_names = None 6006 6007 # dummy cursor function 6008 connection.cursor = lambda : True 6009 6010 return connection
6011 6012 self.db.define_tables = self.define_tables 6013 self.connector = connector 6014 if do_connect: self.reconnect()
6015
6016 - def reconnect(self, f=None, cursor=True):
6017 """ 6018 IMAP4 Pool connection method 6019 6020 imap connection lacks of self cursor command. 6021 A custom command should be provided as a replacement 6022 for connection pooling to prevent uncaught remote session 6023 closing 6024 6025 """ 6026 if getattr(self,'connection',None) != None: 6027 return 6028 if f is None: 6029 f = self.connector 6030 6031 if not self.pool_size: 6032 self.connection = f() 6033 self.cursor = cursor and self.connection.cursor() 6034 else: 6035 POOLS = ConnectionPool.POOLS 6036 uri = self.uri 6037 while True: 6038 GLOBAL_LOCKER.acquire() 6039 if not uri in POOLS: 6040 POOLS[uri] = [] 6041 if POOLS[uri]: 6042 self.connection = POOLS[uri].pop() 6043 GLOBAL_LOCKER.release() 6044 self.cursor = cursor and self.connection.cursor() 6045 if self.cursor and self.check_active_connection: 6046 try: 6047 # check if connection is alive or close it 6048 result, data = self.connection.list() 6049 except: 6050 # Possible connection reset error 6051 # TODO: read exception class 6052 self.connection = f() 6053 break 6054 else: 6055 GLOBAL_LOCKER.release() 6056 self.connection = f() 6057 self.cursor = cursor and self.connection.cursor() 6058 break 6059 self.after_connection_hook()
6060
6061 - def get_last_message(self, tablename):
6062 last_message = None 6063 # request mailbox list to the server 6064 # if needed 6065 if not isinstance(self.connection.mailbox_names, dict): 6066 self.get_mailboxes() 6067 try: 6068 result = self.connection.select(self.connection.mailbox_names[tablename]) 6069 last_message = int(result[1][0]) 6070 except (IndexError, ValueError, TypeError, KeyError): 6071 e = sys.exc_info()[1] 6072 LOGGER.debug("Error retrieving the last mailbox sequence number. %s" % str(e)) 6073 return last_message
6074
6075 - def get_uid_bounds(self, tablename):
6076 if not isinstance(self.connection.mailbox_names, dict): 6077 self.get_mailboxes() 6078 # fetch first and last messages 6079 # return (first, last) messages uid's 6080 last_message = self.get_last_message(tablename) 6081 result, data = self.connection.uid("search", None, "(ALL)") 6082 uid_list = data[0].strip().split() 6083 if len(uid_list) <= 0: 6084 return None 6085 else: 6086 return (uid_list[0], uid_list[-1])
6087
6088 - def convert_date(self, date, add=None):
6089 if add is None: 6090 add = datetime.timedelta() 6091 """ Convert a date object to a string 6092 with d-Mon-Y style for IMAP or the inverse 6093 case 6094 6095 add <timedelta> adds to the date object 6096 """ 6097 months = [None, "JAN","FEB","MAR","APR","MAY","JUN", 6098 "JUL", "AUG","SEP","OCT","NOV","DEC"] 6099 if isinstance(date, basestring): 6100 # Prevent unexpected date response format 6101 try: 6102 dayname, datestring = date.split(",") 6103 date_list = datestring.strip().split() 6104 year = int(date_list[2]) 6105 month = months.index(date_list[1].upper()) 6106 day = int(date_list[0]) 6107 hms = map(int, date_list[3].split(":")) 6108 return datetime.datetime(year, month, day, 6109 hms[0], hms[1], hms[2]) + add 6110 except (ValueError, AttributeError, IndexError), e: 6111 LOGGER.error("Could not parse date text: %s. %s" % 6112 (date, e)) 6113 return None 6114 elif isinstance(date, (datetime.datetime, datetime.date)): 6115 return (date + add).strftime("%d-%b-%Y") 6116 else: 6117 return None
6118 6119 @staticmethod
6120 - def header_represent(f, r):
6121 from email.header import decode_header 6122 text, encoding = decode_header(f)[0] 6123 if encoding: 6124 text = text.decode(encoding).encode('utf-8') 6125 return text
6126
6127 - def encode_text(self, text, charset, errors="replace"):
6128 """ convert text for mail to unicode""" 6129 if text is None: 6130 text = "" 6131 else: 6132 if isinstance(text, str): 6133 if charset is None: 6134 text = unicode(text, "utf-8", errors) 6135 else: 6136 text = unicode(text, charset, errors) 6137 else: 6138 raise Exception("Unsupported mail text type %s" % type(text)) 6139 return text.encode("utf-8")
6140
6141 - def get_charset(self, message):
6142 charset = message.get_content_charset() 6143 return charset
6144
6145 - def get_mailboxes(self):
6146 """ Query the mail database for mailbox names """ 6147 if self.static_names: 6148 # statically defined mailbox names 6149 self.connection.mailbox_names = self.static_names 6150 return self.static_names.keys() 6151 6152 mailboxes_list = self.connection.list() 6153 self.connection.mailbox_names = dict() 6154 mailboxes = list() 6155 x = 0 6156 for item in mailboxes_list[1]: 6157 x = x + 1 6158 item = item.strip() 6159 if not "NOSELECT" in item.upper(): 6160 sub_items = item.split("\"") 6161 sub_items = [sub_item for sub_item in sub_items \ 6162 if len(sub_item.strip()) > 0] 6163 # mailbox = sub_items[len(sub_items) -1] 6164 mailbox = sub_items[-1] 6165 # remove unwanted characters and store original names 6166 # Don't allow leading non alphabetic characters 6167 mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]','',re.sub('[/ ]','_',mailbox))) 6168 mailboxes.append(mailbox_name) 6169 self.connection.mailbox_names[mailbox_name] = mailbox 6170 6171 return mailboxes
6172
6173 - def get_query_mailbox(self, query):
6174 nofield = True 6175 tablename = None 6176 attr = query 6177 while nofield: 6178 if hasattr(attr, "first"): 6179 attr = attr.first 6180 if isinstance(attr, Field): 6181 return attr.tablename 6182 elif isinstance(attr, Query): 6183 pass 6184 else: 6185 return None 6186 else: 6187 return None 6188 return tablename
6189
6190 - def is_flag(self, flag):
6191 if self.search_fields.get(flag, None) in self.flags: 6192 return True 6193 else: 6194 return False
6195
6196 - def define_tables(self, mailbox_names=None):
6197 """ 6198 Auto create common IMAP fileds 6199 6200 This function creates fields definitions "statically" 6201 meaning that custom fields as in other adapters should 6202 not be supported and definitions handled on a service/mode 6203 basis (local syntax for Gmail(r), Ymail(r) 6204 6205 Returns a dictionary with tablename, server native mailbox name 6206 pairs. 6207 """ 6208 if mailbox_names: 6209 # optional statically declared mailboxes 6210 self.static_names = mailbox_names 6211 else: 6212 self.static_names = None 6213 if not isinstance(self.connection.mailbox_names, dict): 6214 self.get_mailboxes() 6215 6216 names = self.connection.mailbox_names.keys() 6217 6218 for name in names: 6219 self.db.define_table("%s" % name, 6220 Field("uid", "string", writable=False), 6221 Field("answered", "boolean"), 6222 Field("created", "datetime", writable=False), 6223 Field("content", "list:string", writable=False), 6224 Field("to", "string", writable=False), 6225 Field("cc", "string", writable=False), 6226 Field("bcc", "string", writable=False), 6227 Field("size", "integer", writable=False), 6228 Field("deleted", "boolean"), 6229 Field("draft", "boolean"), 6230 Field("flagged", "boolean"), 6231 Field("sender", "string", writable=False), 6232 Field("recent", "boolean", writable=False), 6233 Field("seen", "boolean"), 6234 Field("subject", "string", writable=False), 6235 Field("mime", "string", writable=False), 6236 Field("email", "string", writable=False, readable=False), 6237 Field("attachments", list, writable=False, readable=False), 6238 Field("encoding", writable=False) 6239 ) 6240 6241 # Set a special _mailbox attribute for storing 6242 # native mailbox names 6243 self.db[name].mailbox = \ 6244 self.connection.mailbox_names[name] 6245 6246 # decode quoted printable 6247 self.db[name].to.represent = self.db[name].cc.represent = \ 6248 self.db[name].bcc.represent = self.db[name].sender.represent = \ 6249 self.db[name].subject.represent = self.header_represent 6250 6251 # Set the db instance mailbox collections 6252 self.db.mailboxes = self.connection.mailbox_names 6253 return self.db.mailboxes
6254
6255 - def create_table(self, *args, **kwargs):
6256 # not implemented 6257 # but required by DAL 6258 pass
6259
6260 - def _select(self, query, fields, attributes):
6261 if use_common_filters(query): 6262 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6263 return str(query)
6264
6265 - def select(self, query, fields, attributes):
6266 """ Search and Fetch records and return web2py rows 6267 """ 6268 # move this statement elsewhere (upper-level) 6269 if use_common_filters(query): 6270 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6271 6272 import email 6273 # get records from imap server with search + fetch 6274 # convert results to a dictionary 6275 tablename = None 6276 fetch_results = list() 6277 6278 if isinstance(query, Query): 6279 tablename = self.get_table(query) 6280 mailbox = self.connection.mailbox_names.get(tablename, None) 6281 if mailbox is None: 6282 raise ValueError("Mailbox name not found: %s" % mailbox) 6283 else: 6284 # select with readonly 6285 result, selected = self.connection.select(mailbox, True) 6286 if result != "OK": 6287 raise Exception("IMAP error: %s" % selected) 6288 self.mailbox_size = int(selected[0]) 6289 search_query = "(%s)" % str(query).strip() 6290 search_result = self.connection.uid("search", None, search_query) 6291 # Normal IMAP response OK is assumed (change this) 6292 if search_result[0] == "OK": 6293 # For "light" remote server responses just get the first 6294 # ten records (change for non-experimental implementation) 6295 # However, light responses are not guaranteed with this 6296 # approach, just fewer messages. 6297 limitby = attributes.get('limitby', None) 6298 messages_set = search_result[1][0].split() 6299 # descending order 6300 messages_set.reverse() 6301 if limitby is not None: 6302 # TODO: orderby, asc/desc, limitby from complete message set 6303 messages_set = messages_set[int(limitby[0]):int(limitby[1])] 6304 6305 # keep the requests small for header/flags 6306 if any([(field.name in ["content", "size", 6307 "attachments", "email"]) for 6308 field in fields]): 6309 imap_fields = "(RFC822 FLAGS)" 6310 else: 6311 imap_fields = "(RFC822.HEADER FLAGS)" 6312 6313 if len(messages_set) > 0: 6314 # create fetch results object list 6315 # fetch each remote message and store it in memmory 6316 # (change to multi-fetch command syntax for faster 6317 # transactions) 6318 for uid in messages_set: 6319 # fetch the RFC822 message body 6320 typ, data = self.connection.uid("fetch", uid, imap_fields) 6321 if typ == "OK": 6322 fr = {"message": int(data[0][0].split()[0]), 6323 "uid": long(uid), 6324 "email": email.message_from_string(data[0][1]), 6325 "raw_message": data[0][1]} 6326 fr["multipart"] = fr["email"].is_multipart() 6327 # fetch flags for the message 6328 fr["flags"] = self.driver.ParseFlags(data[1]) 6329 fetch_results.append(fr) 6330 else: 6331 # error retrieving the message body 6332 raise Exception("IMAP error retrieving the body: %s" % data) 6333 else: 6334 raise Exception("IMAP search error: %s" % search_result[1]) 6335 elif isinstance(query, (Expression, basestring)): 6336 raise NotImplementedError() 6337 else: 6338 raise TypeError("Unexpected query type") 6339 6340 imapqry_dict = {} 6341 imapfields_dict = {} 6342 6343 if len(fields) == 1 and isinstance(fields[0], SQLALL): 6344 allfields = True 6345 elif len(fields) == 0: 6346 allfields = True 6347 else: 6348 allfields = False 6349 if allfields: 6350 colnames = ["%s.%s" % (tablename, field) for field in self.search_fields.keys()] 6351 else: 6352 colnames = ["%s.%s" % (tablename, field.name) for field in fields] 6353 6354 for k in colnames: 6355 imapfields_dict[k] = k 6356 6357 imapqry_list = list() 6358 imapqry_array = list() 6359 for fr in fetch_results: 6360 attachments = [] 6361 content = [] 6362 size = 0 6363 n = int(fr["message"]) 6364 item_dict = dict() 6365 message = fr["email"] 6366 uid = fr["uid"] 6367 charset = self.get_charset(message) 6368 flags = fr["flags"] 6369 raw_message = fr["raw_message"] 6370 # Return messages data mapping static fields 6371 # and fetched results. Mapping should be made 6372 # outside the select function (with auxiliary 6373 # instance methods) 6374 6375 # pending: search flags states trough the email message 6376 # instances for correct output 6377 6378 # preserve subject encoding (ASCII/quoted printable) 6379 6380 if "%s.id" % tablename in colnames: 6381 item_dict["%s.id" % tablename] = n 6382 if "%s.created" % tablename in colnames: 6383 item_dict["%s.created" % tablename] = self.convert_date(message["Date"]) 6384 if "%s.uid" % tablename in colnames: 6385 item_dict["%s.uid" % tablename] = uid 6386 if "%s.sender" % tablename in colnames: 6387 # If there is no encoding found in the message header 6388 # force utf-8 replacing characters (change this to 6389 # module's defaults). Applies to .sender, .to, .cc and .bcc fields 6390 item_dict["%s.sender" % tablename] = message["From"] 6391 if "%s.to" % tablename in colnames: 6392 item_dict["%s.to" % tablename] = message["To"] 6393 if "%s.cc" % tablename in colnames: 6394 if "Cc" in message.keys(): 6395 item_dict["%s.cc" % tablename] = message["Cc"] 6396 else: 6397 item_dict["%s.cc" % tablename] = "" 6398 if "%s.bcc" % tablename in colnames: 6399 if "Bcc" in message.keys(): 6400 item_dict["%s.bcc" % tablename] = message["Bcc"] 6401 else: 6402 item_dict["%s.bcc" % tablename] = "" 6403 if "%s.deleted" % tablename in colnames: 6404 item_dict["%s.deleted" % tablename] = "\\Deleted" in flags 6405 if "%s.draft" % tablename in colnames: 6406 item_dict["%s.draft" % tablename] = "\\Draft" in flags 6407 if "%s.flagged" % tablename in colnames: 6408 item_dict["%s.flagged" % tablename] = "\\Flagged" in flags 6409 if "%s.recent" % tablename in colnames: 6410 item_dict["%s.recent" % tablename] = "\\Recent" in flags 6411 if "%s.seen" % tablename in colnames: 6412 item_dict["%s.seen" % tablename] = "\\Seen" in flags 6413 if "%s.subject" % tablename in colnames: 6414 item_dict["%s.subject" % tablename] = message["Subject"] 6415 if "%s.answered" % tablename in colnames: 6416 item_dict["%s.answered" % tablename] = "\\Answered" in flags 6417 if "%s.mime" % tablename in colnames: 6418 item_dict["%s.mime" % tablename] = message.get_content_type() 6419 if "%s.encoding" % tablename in colnames: 6420 item_dict["%s.encoding" % tablename] = charset 6421 6422 # Here goes the whole RFC822 body as an email instance 6423 # for controller side custom processing 6424 # The message is stored as a raw string 6425 # >> email.message_from_string(raw string) 6426 # returns a Message object for enhanced object processing 6427 if "%s.email" % tablename in colnames: 6428 # WARNING: no encoding performed (raw message) 6429 item_dict["%s.email" % tablename] = raw_message 6430 6431 # Size measure as suggested in a Velocity Reviews post 6432 # by Tim Williams: "how to get size of email attachment" 6433 # Note: len() and server RFC822.SIZE reports doesn't match 6434 # To retrieve the server size for representation would add a new 6435 # fetch transaction to the process 6436 for part in message.walk(): 6437 maintype = part.get_content_maintype() 6438 if ("%s.attachments" % tablename in colnames) or \ 6439 ("%s.content" % tablename in colnames): 6440 if "%s.attachments" % tablename in colnames: 6441 if not ("text" in maintype): 6442 payload = part.get_payload(decode=True) 6443 if payload: 6444 attachment = { 6445 "payload": payload, 6446 "filename": part.get_filename(), 6447 "encoding": part.get_content_charset(), 6448 "mime": part.get_content_type(), 6449 "disposition": part["Content-Disposition"]} 6450 attachments.append(attachment) 6451 if "%s.content" % tablename in colnames: 6452 payload = part.get_payload(decode=True) 6453 part_charset = self.get_charset(part) 6454 if "text" in maintype: 6455 if payload: 6456 content.append(self.encode_text(payload, part_charset)) 6457 if "%s.size" % tablename in colnames: 6458 if part is not None: 6459 size += len(str(part)) 6460 item_dict["%s.content" % tablename] = content 6461 item_dict["%s.attachments" % tablename] = attachments 6462 item_dict["%s.size" % tablename] = size 6463 imapqry_list.append(item_dict) 6464 6465 # extra object mapping for the sake of rows object 6466 # creation (sends an array or lists) 6467 for item_dict in imapqry_list: 6468 imapqry_array_item = list() 6469 for fieldname in colnames: 6470 imapqry_array_item.append(item_dict[fieldname]) 6471 imapqry_array.append(imapqry_array_item) 6472 6473 # parse result and return a rows object 6474 colnames = colnames 6475 processor = attributes.get('processor',self.parse) 6476 return processor(imapqry_array, fields, colnames)
6477
6478 - def _update(self, tablename, query, fields, commit=False):
6479 # TODO: the adapter should implement an .expand method 6480 commands = list() 6481 if use_common_filters(query): 6482 query = self.common_filter(query, [tablename,]) 6483 mark = [] 6484 unmark = [] 6485 if query: 6486 for item in fields: 6487 field = item[0] 6488 name = field.name 6489 value = item[1] 6490 if self.is_flag(name): 6491 flag = self.search_fields[name] 6492 if (value is not None) and (flag != "\\Recent"): 6493 if value: 6494 mark.append(flag) 6495 else: 6496 unmark.append(flag) 6497 result, data = self.connection.select( 6498 self.connection.mailbox_names[tablename]) 6499 string_query = "(%s)" % query 6500 result, data = self.connection.search(None, string_query) 6501 store_list = [item.strip() for item in data[0].split() 6502 if item.strip().isdigit()] 6503 # build commands for marked flags 6504 for number in store_list: 6505 result = None 6506 if len(mark) > 0: 6507 commands.append((number, "+FLAGS", "(%s)" % " ".join(mark))) 6508 if len(unmark) > 0: 6509 commands.append((number, "-FLAGS", "(%s)" % " ".join(unmark))) 6510 return commands
6511
6512 - def update(self, tablename, query, fields):
6513 rowcount = 0 6514 commands = self._update(tablename, query, fields) 6515 for command in commands: 6516 result, data = self.connection.store(*command) 6517 if result == "OK": 6518 rowcount += 1 6519 else: 6520 raise Exception("IMAP storing error: %s" % data) 6521 return rowcount
6522
6523 - def _count(self, query, distinct=None):
6524 raise NotImplementedError()
6525
6526 - def count(self,query,distinct=None):
6527 counter = 0 6528 tablename = self.get_query_mailbox(query) 6529 if query and tablename is not None: 6530 if use_common_filters(query): 6531 query = self.common_filter(query, [tablename,]) 6532 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6533 string_query = "(%s)" % query 6534 result, data = self.connection.search(None, string_query) 6535 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6536 counter = len(store_list) 6537 return counter
6538
6539 - def delete(self, tablename, query):
6540 counter = 0 6541 if query: 6542 if use_common_filters(query): 6543 query = self.common_filter(query, [tablename,]) 6544 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6545 string_query = "(%s)" % query 6546 result, data = self.connection.search(None, string_query) 6547 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6548 for number in store_list: 6549 result, data = self.connection.store(number, "+FLAGS", "(\\Deleted)") 6550 if result == "OK": 6551 counter += 1 6552 else: 6553 raise Exception("IMAP store error: %s" % data) 6554 if counter > 0: 6555 result, data = self.connection.expunge() 6556 return counter
6557
6558 - def BELONGS(self, first, second):
6559 result = None 6560 name = self.search_fields[first.name] 6561 if name == "MESSAGE": 6562 values = [str(val) for val in second if str(val).isdigit()] 6563 result = "%s" % ",".join(values).strip() 6564 6565 elif name == "UID": 6566 values = [str(val) for val in second if str(val).isdigit()] 6567 result = "UID %s" % ",".join(values).strip() 6568 6569 else: 6570 raise Exception("Operation not supported") 6571 # result = "(%s %s)" % (self.expand(first), self.expand(second)) 6572 return result
6573
6574 - def CONTAINS(self, first, second, case_sensitive=False):
6575 # silently ignore, only case sensitive 6576 result = None 6577 name = self.search_fields[first.name] 6578 6579 if name in ("FROM", "TO", "SUBJECT", "TEXT"): 6580 result = "%s \"%s\"" % (name, self.expand(second)) 6581 else: 6582 if first.name in ("cc", "bcc"): 6583 result = "%s \"%s\"" % (first.name.upper(), self.expand(second)) 6584 elif first.name == "mime": 6585 result = "HEADER Content-Type \"%s\"" % self.expand(second) 6586 else: 6587 raise Exception("Operation not supported") 6588 return result
6589
6590 - def GT(self, first, second):
6591 result = None 6592 name = self.search_fields[first.name] 6593 if name == "MESSAGE": 6594 last_message = self.get_last_message(first.tablename) 6595 result = "%d:%d" % (int(self.expand(second)) + 1, last_message) 6596 elif name == "UID": 6597 # GT and LT may not return 6598 # expected sets depending on 6599 # the uid format implemented 6600 try: 6601 pedestal, threshold = self.get_uid_bounds(first.tablename) 6602 except TypeError: 6603 e = sys.exc_info()[1] 6604 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6605 return "" 6606 try: 6607 lower_limit = int(self.expand(second)) + 1 6608 except (ValueError, TypeError): 6609 e = sys.exc_info()[1] 6610 raise Exception("Operation not supported (non integer UID)") 6611 result = "UID %s:%s" % (lower_limit, threshold) 6612 elif name == "DATE": 6613 result = "SINCE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6614 elif name == "SIZE": 6615 result = "LARGER %s" % self.expand(second) 6616 else: 6617 raise Exception("Operation not supported") 6618 return result
6619
6620 - def GE(self, first, second):
6621 result = None 6622 name = self.search_fields[first.name] 6623 if name == "MESSAGE": 6624 last_message = self.get_last_message(first.tablename) 6625 result = "%s:%s" % (self.expand(second), last_message) 6626 elif name == "UID": 6627 # GT and LT may not return 6628 # expected sets depending on 6629 # the uid format implemented 6630 try: 6631 pedestal, threshold = self.get_uid_bounds(first.tablename) 6632 except TypeError: 6633 e = sys.exc_info()[1] 6634 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6635 return "" 6636 lower_limit = self.expand(second) 6637 result = "UID %s:%s" % (lower_limit, threshold) 6638 elif name == "DATE": 6639 result = "SINCE %s" % self.convert_date(second) 6640 else: 6641 raise Exception("Operation not supported") 6642 return result
6643
6644 - def LT(self, first, second):
6645 result = None 6646 name = self.search_fields[first.name] 6647 if name == "MESSAGE": 6648 result = "%s:%s" % (1, int(self.expand(second)) - 1) 6649 elif name == "UID": 6650 try: 6651 pedestal, threshold = self.get_uid_bounds(first.tablename) 6652 except TypeError: 6653 e = sys.exc_info()[1] 6654 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6655 return "" 6656 try: 6657 upper_limit = int(self.expand(second)) - 1 6658 except (ValueError, TypeError): 6659 e = sys.exc_info()[1] 6660 raise Exception("Operation not supported (non integer UID)") 6661 result = "UID %s:%s" % (pedestal, upper_limit) 6662 elif name == "DATE": 6663 result = "BEFORE %s" % self.convert_date(second) 6664 elif name == "SIZE": 6665 result = "SMALLER %s" % self.expand(second) 6666 else: 6667 raise Exception("Operation not supported") 6668 return result
6669
6670 - def LE(self, first, second):
6671 result = None 6672 name = self.search_fields[first.name] 6673 if name == "MESSAGE": 6674 result = "%s:%s" % (1, self.expand(second)) 6675 elif name == "UID": 6676 try: 6677 pedestal, threshold = self.get_uid_bounds(first.tablename) 6678 except TypeError: 6679 e = sys.exc_info()[1] 6680 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6681 return "" 6682 upper_limit = int(self.expand(second)) 6683 result = "UID %s:%s" % (pedestal, upper_limit) 6684 elif name == "DATE": 6685 result = "BEFORE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6686 else: 6687 raise Exception("Operation not supported") 6688 return result
6689
6690 - def NE(self, first, second=None):
6691 if (second is None) and isinstance(first, Field): 6692 # All records special table query 6693 if first.type == "id": 6694 return self.GE(first, 1) 6695 result = self.NOT(self.EQ(first, second)) 6696 result = result.replace("NOT NOT", "").strip() 6697 return result
6698
6699 - def EQ(self,first,second):
6700 name = self.search_fields[first.name] 6701 result = None 6702 if name is not None: 6703 if name == "MESSAGE": 6704 # query by message sequence number 6705 result = "%s" % self.expand(second) 6706 elif name == "UID": 6707 result = "UID %s" % self.expand(second) 6708 elif name == "DATE": 6709 result = "ON %s" % self.convert_date(second) 6710 6711 elif name in self.flags: 6712 if second: 6713 result = "%s" % (name.upper()[1:]) 6714 else: 6715 result = "NOT %s" % (name.upper()[1:]) 6716 else: 6717 raise Exception("Operation not supported") 6718 else: 6719 raise Exception("Operation not supported") 6720 return result
6721
6722 - def AND(self, first, second):
6723 result = "%s %s" % (self.expand(first), self.expand(second)) 6724 return result
6725
6726 - def OR(self, first, second):
6727 result = "OR %s %s" % (self.expand(first), self.expand(second)) 6728 return "%s" % result.replace("OR OR", "OR")
6729
6730 - def NOT(self, first):
6731 result = "NOT %s" % self.expand(first) 6732 return result
6733 6734 ######################################################################## 6735 # end of adapters 6736 ######################################################################## 6737 6738 ADAPTERS = { 6739 'sqlite': SQLiteAdapter, 6740 'spatialite': SpatiaLiteAdapter, 6741 'sqlite:memory': SQLiteAdapter, 6742 'spatialite:memory': SpatiaLiteAdapter, 6743 'mysql': MySQLAdapter, 6744 'postgres': PostgreSQLAdapter, 6745 'postgres:psycopg2': PostgreSQLAdapter, 6746 'postgres:pg8000': PostgreSQLAdapter, 6747 'postgres2:psycopg2': NewPostgreSQLAdapter, 6748 'postgres2:pg8000': NewPostgreSQLAdapter, 6749 'oracle': OracleAdapter, 6750 'mssql': MSSQLAdapter, 6751 'mssql2': MSSQL2Adapter, 6752 'mssql3': MSSQL3Adapter, 6753 'vertica': VerticaAdapter, 6754 'sybase': SybaseAdapter, 6755 'db2': DB2Adapter, 6756 'teradata': TeradataAdapter, 6757 'informix': InformixAdapter, 6758 'informix-se': InformixSEAdapter, 6759 'firebird': FireBirdAdapter, 6760 'firebird_embedded': FireBirdAdapter, 6761 'ingres': IngresAdapter, 6762 'ingresu': IngresUnicodeAdapter, 6763 'sapdb': SAPDBAdapter, 6764 'cubrid': CubridAdapter, 6765 'jdbc:sqlite': JDBCSQLiteAdapter, 6766 'jdbc:sqlite:memory': JDBCSQLiteAdapter, 6767 'jdbc:postgres': JDBCPostgreSQLAdapter, 6768 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility 6769 'google:datastore': GoogleDatastoreAdapter, 6770 'google:sql': GoogleSQLAdapter, 6771 'couchdb': CouchDBAdapter, 6772 'mongodb': MongoDBAdapter, 6773 'imap': IMAPAdapter 6774 }
6775 6776 -def sqlhtml_validators(field):
6777 """ 6778 Field type validation, using web2py's validators mechanism. 6779 6780 makes sure the content of a field is in line with the declared 6781 fieldtype 6782 """ 6783 db = field.db 6784 if not have_validators: 6785 return [] 6786 field_type, field_length = field.type, field.length 6787 if isinstance(field_type, SQLCustomType): 6788 if hasattr(field_type, 'validator'): 6789 return field_type.validator 6790 else: 6791 field_type = field_type.type 6792 elif not isinstance(field_type,str): 6793 return [] 6794 requires=[] 6795 def ff(r,id): 6796 row=r(id) 6797 if not row: 6798 return id 6799 elif hasattr(r, '_format') and isinstance(r._format,str): 6800 return r._format % row 6801 elif hasattr(r, '_format') and callable(r._format): 6802 return r._format(row) 6803 else: 6804 return id
6805 if field_type in (('string', 'text', 'password')): 6806 requires.append(validators.IS_LENGTH(field_length)) 6807 elif field_type == 'json': 6808 requires.append(validators.IS_EMPTY_OR(validators.IS_JSON(native_json=field.db._adapter.native_json))) 6809 elif field_type == 'double' or field_type == 'float': 6810 requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) 6811 elif field_type in ('integer','bigint'): 6812 requires.append(validators.IS_INT_IN_RANGE(-1e100, 1e100)) 6813 elif field_type.startswith('decimal'): 6814 requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) 6815 elif field_type == 'date': 6816 requires.append(validators.IS_DATE()) 6817 elif field_type == 'time': 6818 requires.append(validators.IS_TIME()) 6819 elif field_type == 'datetime': 6820 requires.append(validators.IS_DATETIME()) 6821 elif db and field_type.startswith('reference') and \ 6822 field_type.find('.') < 0 and \ 6823 field_type[10:] in db.tables: 6824 referenced = db[field_type[10:]] 6825 def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id) 6826 field.represent = field.represent or repr_ref 6827 if hasattr(referenced, '_format') and referenced._format: 6828 requires = validators.IS_IN_DB(db,referenced._id, 6829 referenced._format) 6830 if field.unique: 6831 requires._and = validators.IS_NOT_IN_DB(db,field) 6832 if field.tablename == field_type[10:]: 6833 return validators.IS_EMPTY_OR(requires) 6834 return requires 6835 elif db and field_type.startswith('list:reference') and \ 6836 field_type.find('.') < 0 and \ 6837 field_type[15:] in db.tables: 6838 referenced = db[field_type[15:]] 6839 def list_ref_repr(ids, row=None, r=referenced, f=ff): 6840 if not ids: 6841 return None 6842 refs = None 6843 db, id = r._db, r._id 6844 if isinstance(db._adapter, GoogleDatastoreAdapter): 6845 def count(values): return db(id.belongs(values)).select(id) 6846 rx = range(0, len(ids), 30) 6847 refs = reduce(lambda a,b:a&b, [count(ids[i:i+30]) for i in rx]) 6848 else: 6849 refs = db(id.belongs(ids)).select(id) 6850 return (refs and ', '.join(str(f(r,x.id)) for x in refs) or '') 6851 field.represent = field.represent or list_ref_repr 6852 if hasattr(referenced, '_format') and referenced._format: 6853 requires = validators.IS_IN_DB(db,referenced._id, 6854 referenced._format,multiple=True) 6855 else: 6856 requires = validators.IS_IN_DB(db,referenced._id, 6857 multiple=True) 6858 if field.unique: 6859 requires._and = validators.IS_NOT_IN_DB(db,field) 6860 return requires 6861 elif field_type.startswith('list:'): 6862 def repr_list(values,row=None): return', '.join(str(v) for v in (values or [])) 6863 field.represent = field.represent or repr_list 6864 if field.unique: 6865 requires.insert(0,validators.IS_NOT_IN_DB(db,field)) 6866 sff = ['in', 'do', 'da', 'ti', 'de', 'bo'] 6867 if field.notnull and not field_type[:2] in sff: 6868 requires.insert(0, validators.IS_NOT_EMPTY()) 6869 elif not field.notnull and field_type[:2] in sff and requires: 6870 requires[-1] = validators.IS_EMPTY_OR(requires[-1]) 6871 return requires 6872
6873 6874 -def bar_escape(item):
6875 return str(item).replace('|', '||')
6876
6877 -def bar_encode(items):
6878 return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
6879
6880 -def bar_decode_integer(value):
6881 if not hasattr(value,'split') and hasattr(value,'read'): 6882 value = value.read() 6883 return [long(x) for x in value.split('|') if x.strip()]
6884
6885 -def bar_decode_string(value):
6886 return [x.replace('||', '|') for x in 6887 REGEX_UNPACK.split(value[1:-1]) if x.strip()]
6888
6889 6890 -class Row(object):
6891 6892 """ 6893 a dictionary that lets you do d['a'] as well as d.a 6894 this is only used to store a Row 6895 """ 6896 6897 __init__ = lambda self,*args,**kwargs: self.__dict__.update(*args,**kwargs) 6898
6899 - def __getitem__(self, k):
6900 key=str(k) 6901 _extra = self.__dict__.get('_extra', None) 6902 if _extra is not None: 6903 v = _extra.get(key, DEFAULT) 6904 if v != DEFAULT: 6905 return v 6906 m = REGEX_TABLE_DOT_FIELD.match(key) 6907 if m: 6908 try: 6909 return ogetattr(self, m.group(1))[m.group(2)] 6910 except (KeyError,AttributeError,TypeError): 6911 key = m.group(2) 6912 return ogetattr(self, key)
6913 6914 __setitem__ = lambda self, key, value: setattr(self, str(key), value) 6915 6916 __delitem__ = object.__delattr__ 6917 6918 __copy__ = lambda self: Row(self) 6919 6920 __call__ = __getitem__ 6921 6922 get = lambda self, key, default=None: self.__dict__.get(key,default) 6923 6924 6925 has_key = __contains__ = lambda self, key: key in self.__dict__ 6926 6927 __nonzero__ = lambda self: len(self.__dict__)>0 6928 6929 update = lambda self, *args, **kwargs: self.__dict__.update(*args, **kwargs) 6930 6931 keys = lambda self: self.__dict__.keys() 6932 6933 items = lambda self: self.__dict__.items() 6934 6935 values = lambda self: self.__dict__.values() 6936 6937 __iter__ = lambda self: self.__dict__.__iter__() 6938 6939 iteritems = lambda self: self.__dict__.iteritems() 6940 6941 __str__ = __repr__ = lambda self: '<Row %s>' % self.as_dict() 6942 6943 __int__ = lambda self: object.__getattribute__(self,'id') 6944 6945 __long__ = lambda self: long(object.__getattribute__(self,'id')) 6946 6947
6948 - def __eq__(self,other):
6949 try: 6950 return self.as_dict() == other.as_dict() 6951 except AttributeError: 6952 return False
6953
6954 - def __ne__(self,other):
6955 return not (self == other)
6956
6957 - def __copy__(self):
6958 return Row(dict(self))
6959
6960 - def as_dict(self, datetime_to_str=False, custom_types=None):
6961 SERIALIZABLE_TYPES = [str, unicode, int, long, float, bool, list, dict] 6962 if isinstance(custom_types,(list,tuple,set)): 6963 SERIALIZABLE_TYPES += custom_types 6964 elif custom_types: 6965 SERIALIZABLE_TYPES.append(custom_types) 6966 d = dict(self) 6967 for k in copy.copy(d.keys()): 6968 v=d[k] 6969 if d[k] is None: 6970 continue 6971 elif isinstance(v,Row): 6972 d[k]=v.as_dict() 6973 elif isinstance(v,Reference): 6974 d[k]=long(v) 6975 elif isinstance(v,decimal.Decimal): 6976 d[k]=float(v) 6977 elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)): 6978 if datetime_to_str: 6979 d[k] = v.isoformat().replace('T',' ')[:19] 6980 elif not isinstance(v,tuple(SERIALIZABLE_TYPES)): 6981 del d[k] 6982 return d
6983
6984 - def as_xml(self, row_name="row", colnames=None, indent=' '):
6985 def f(row,field,indent=' '): 6986 if isinstance(row,Row): 6987 spc = indent+' \n' 6988 items = [f(row[x],x,indent+' ') for x in row] 6989 return '%s<%s>\n%s\n%s</%s>' % ( 6990 indent, 6991 field, 6992 spc.join(item for item in items if item), 6993 indent, 6994 field) 6995 elif not callable(row): 6996 if REGEX_ALPHANUMERIC.match(field): 6997 return '%s<%s>%s</%s>' % (indent,field,row,field) 6998 else: 6999 return '%s<extra name="%s">%s</extra>' % \ 7000 (indent,field,row) 7001 else: 7002 return None
7003 return f(self, row_name, indent=indent)
7004
7005 - def as_json(self, mode="object", default=None, colnames=None, 7006 serialize=True, **kwargs):
7007 """ 7008 serializes the row to a JSON object 7009 kwargs are passed to .as_dict method 7010 only "object" mode supported 7011 7012 serialize = False used by Rows.as_json 7013 TODO: return array mode with query column order 7014 7015 mode and colnames are not implemented 7016 """ 7017 7018 item = self.as_dict(**kwargs) 7019 if serialize: 7020 if have_serializers: 7021 return serializers.json(item, 7022 default=default or 7023 serializers.custom_json) 7024 elif simplejson: 7025 return simplejson.dumps(item) 7026 else: 7027 raise RuntimeError("missing simplejson") 7028 else: 7029 return item
7030
7031 7032 ################################################################################ 7033 # Everything below should be independent of the specifics of the database 7034 # and should work for RDBMs and some NoSQL databases 7035 ################################################################################ 7036 7037 -class SQLCallableList(list):
7038 - def __call__(self):
7039 return copy.copy(self)
7040
7041 -def smart_query(fields,text):
7042 if not isinstance(fields,(list,tuple)): 7043 fields = [fields] 7044 new_fields = [] 7045 for field in fields: 7046 if isinstance(field,Field): 7047 new_fields.append(field) 7048 elif isinstance(field,Table): 7049 for ofield in field: 7050 new_fields.append(ofield) 7051 else: 7052 raise RuntimeError("fields must be a list of fields") 7053 fields = new_fields 7054 field_map = {} 7055 for field in fields: 7056 n = field.name.lower() 7057 if not n in field_map: 7058 field_map[n] = field 7059 n = str(field).lower() 7060 if not n in field_map: 7061 field_map[n] = field 7062 constants = {} 7063 i = 0 7064 while True: 7065 m = REGEX_CONST_STRING.search(text) 7066 if not m: break 7067 text = text[:m.start()]+('#%i' % i)+text[m.end():] 7068 constants[str(i)] = m.group()[1:-1] 7069 i+=1 7070 text = re.sub('\s+',' ',text).lower() 7071 for a,b in [('&','and'), 7072 ('|','or'), 7073 ('~','not'), 7074 ('==','='), 7075 ('<','<'), 7076 ('>','>'), 7077 ('<=','<='), 7078 ('>=','>='), 7079 ('<>','!='), 7080 ('=<','<='), 7081 ('=>','>='), 7082 ('=','='), 7083 (' less or equal than ','<='), 7084 (' greater or equal than ','>='), 7085 (' equal or less than ','<='), 7086 (' equal or greater than ','>='), 7087 (' less or equal ','<='), 7088 (' greater or equal ','>='), 7089 (' equal or less ','<='), 7090 (' equal or greater ','>='), 7091 (' not equal to ','!='), 7092 (' not equal ','!='), 7093 (' equal to ','='), 7094 (' equal ','='), 7095 (' equals ','='), 7096 (' less than ','<'), 7097 (' greater than ','>'), 7098 (' starts with ','startswith'), 7099 (' ends with ','endswith'), 7100 (' not in ' , 'notbelongs'), 7101 (' in ' , 'belongs'), 7102 (' is ','=')]: 7103 if a[0]==' ': 7104 text = text.replace(' is'+a,' %s ' % b) 7105 text = text.replace(a,' %s ' % b) 7106 text = re.sub('\s+',' ',text).lower() 7107 text = re.sub('(?P<a>[\<\>\!\=])\s+(?P<b>[\<\>\!\=])','\g<a>\g<b>',text) 7108 query = field = neg = op = logic = None 7109 for item in text.split(): 7110 if field is None: 7111 if item == 'not': 7112 neg = True 7113 elif not neg and not logic and item in ('and','or'): 7114 logic = item 7115 elif item in field_map: 7116 field = field_map[item] 7117 else: 7118 raise RuntimeError("Invalid syntax") 7119 elif not field is None and op is None: 7120 op = item 7121 elif not op is None: 7122 if item.startswith('#'): 7123 if not item[1:] in constants: 7124 raise RuntimeError("Invalid syntax") 7125 value = constants[item[1:]] 7126 else: 7127 value = item 7128 if field.type in ('text', 'string', 'json'): 7129 if op == '=': op = 'like' 7130 if op == '=': new_query = field==value 7131 elif op == '<': new_query = field<value 7132 elif op == '>': new_query = field>value 7133 elif op == '<=': new_query = field<=value 7134 elif op == '>=': new_query = field>=value 7135 elif op == '!=': new_query = field!=value 7136 elif op == 'belongs': new_query = field.belongs(value.split(',')) 7137 elif op == 'notbelongs': new_query = ~field.belongs(value.split(',')) 7138 elif field.type in ('text', 'string', 'json'): 7139 if op == 'contains': new_query = field.contains(value) 7140 elif op == 'like': new_query = field.like(value) 7141 elif op == 'startswith': new_query = field.startswith(value) 7142 elif op == 'endswith': new_query = field.endswith(value) 7143 else: raise RuntimeError("Invalid operation") 7144 elif field._db._adapter.dbengine=='google:datastore' and \ 7145 field.type in ('list:integer', 'list:string', 'list:reference'): 7146 if op == 'contains': new_query = field.contains(value) 7147 else: raise RuntimeError("Invalid operation") 7148 else: raise RuntimeError("Invalid operation") 7149 if neg: new_query = ~new_query 7150 if query is None: 7151 query = new_query 7152 elif logic == 'and': 7153 query &= new_query 7154 elif logic == 'or': 7155 query |= new_query 7156 field = op = neg = logic = None 7157 return query
7158
7159 -class DAL(object):
7160 7161 """ 7162 an instance of this class represents a database connection 7163 7164 Example:: 7165 7166 db = DAL('sqlite://test.db') 7167 7168 or 7169 7170 db = DAL({"uri": ..., "items": ...}) # experimental 7171 7172 db.define_table('tablename', Field('fieldname1'), 7173 Field('fieldname2')) 7174 """ 7175
7176 - def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs):
7177 if not hasattr(THREAD_LOCAL,'db_instances'): 7178 THREAD_LOCAL.db_instances = {} 7179 if not hasattr(THREAD_LOCAL,'db_instances_zombie'): 7180 THREAD_LOCAL.db_instances_zombie = {} 7181 if uri == '<zombie>': 7182 db_uid = kwargs['db_uid'] # a zombie must have a db_uid! 7183 if db_uid in THREAD_LOCAL.db_instances: 7184 db_group = THREAD_LOCAL.db_instances[db_uid] 7185 db = db_group[-1] 7186 elif db_uid in THREAD_LOCAL.db_instances_zombie: 7187 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7188 else: 7189 db = super(DAL, cls).__new__(cls) 7190 THREAD_LOCAL.db_instances_zombie[db_uid] = db 7191 else: 7192 db_uid = kwargs.get('db_uid',hashlib_md5(repr(uri)).hexdigest()) 7193 if db_uid in THREAD_LOCAL.db_instances_zombie: 7194 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7195 del THREAD_LOCAL.db_instances_zombie[db_uid] 7196 else: 7197 db = super(DAL, cls).__new__(cls) 7198 db_group = THREAD_LOCAL.db_instances.get(db_uid,[]) 7199 db_group.append(db) 7200 THREAD_LOCAL.db_instances[db_uid] = db_group 7201 db._db_uid = db_uid 7202 return db
7203 7204 @staticmethod
7205 - def set_folder(folder):
7206 """ 7207 # ## this allows gluon to set a folder for this thread 7208 # ## <<<<<<<<< Should go away as new DAL replaces old sql.py 7209 """ 7210 BaseAdapter.set_folder(folder)
7211 7212 @staticmethod
7213 - def get_instances():
7214 """ 7215 Returns a dictionary with uri as key with timings and defined tables 7216 {'sqlite://storage.sqlite': { 7217 'dbstats': [(select auth_user.email from auth_user, 0.02009)], 7218 'dbtables': { 7219 'defined': ['auth_cas', 'auth_event', 'auth_group', 7220 'auth_membership', 'auth_permission', 'auth_user'], 7221 'lazy': '[]' 7222 } 7223 } 7224 } 7225 """ 7226 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 7227 infos = {} 7228 for db_uid, db_group in dbs: 7229 for db in db_group: 7230 if not db._uri: 7231 continue 7232 k = hide_password(db._uri) 7233 infos[k] = dict(dbstats = [(row[0], row[1]) for row in db._timings], 7234 dbtables = {'defined': 7235 sorted(list(set(db.tables) - 7236 set(db._LAZY_TABLES.keys()))), 7237 'lazy': sorted(db._LAZY_TABLES.keys())} 7238 ) 7239 return infos
7240 7241 @staticmethod
7242 - def distributed_transaction_begin(*instances):
7243 if not instances: 7244 return 7245 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7246 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7247 instances = enumerate(instances) 7248 for (i, db) in instances: 7249 if not db._adapter.support_distributed_transaction(): 7250 raise SyntaxError( 7251 'distributed transaction not suported by %s' % db._dbname) 7252 for (i, db) in instances: 7253 db._adapter.distributed_transaction_begin(keys[i])
7254 7255 @staticmethod
7256 - def distributed_transaction_commit(*instances):
7257 if not instances: 7258 return 7259 instances = enumerate(instances) 7260 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7261 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7262 for (i, db) in instances: 7263 if not db._adapter.support_distributed_transaction(): 7264 raise SyntaxError( 7265 'distributed transaction not suported by %s' % db._dbanme) 7266 try: 7267 for (i, db) in instances: 7268 db._adapter.prepare(keys[i]) 7269 except: 7270 for (i, db) in instances: 7271 db._adapter.rollback_prepared(keys[i]) 7272 raise RuntimeError('failure to commit distributed transaction') 7273 else: 7274 for (i, db) in instances: 7275 db._adapter.commit_prepared(keys[i]) 7276 return
7277
7278 - def __init__(self, uri=DEFAULT_URI, 7279 pool_size=0, folder=None, 7280 db_codec='UTF-8', check_reserved=None, 7281 migrate=True, fake_migrate=False, 7282 migrate_enabled=True, fake_migrate_all=False, 7283 decode_credentials=False, driver_args=None, 7284 adapter_args=None, attempts=5, auto_import=False, 7285 bigint_id=False,debug=False,lazy_tables=False, 7286 db_uid=None, do_connect=True, after_connection=None):
7287 """ 7288 Creates a new Database Abstraction Layer instance. 7289 7290 Keyword arguments: 7291 7292 :uri: string that contains information for connecting to a database. 7293 (default: 'sqlite://dummy.db') 7294 7295 experimental: you can specify a dictionary as uri 7296 parameter i.e. with 7297 db = DAL({"uri": "sqlite://storage.sqlite", 7298 "items": {...}, ...}) 7299 7300 for an example of dict input you can check the output 7301 of the scaffolding db model with 7302 7303 db.as_dict() 7304 7305 Note that for compatibility with Python older than 7306 version 2.6.5 you should cast your dict input keys 7307 to str due to a syntax limitation on kwarg names. 7308 for proper DAL dictionary input you can use one of: 7309 7310 obj = serializers.cast_keys(dict, [encoding="utf-8"]) 7311 7312 or else (for parsing json input) 7313 7314 obj = serializers.loads_json(data, unicode_keys=False) 7315 7316 :pool_size: How many open connections to make to the database object. 7317 :folder: where .table files will be created. 7318 automatically set within web2py 7319 use an explicit path when using DAL outside web2py 7320 :db_codec: string encoding of the database (default: 'UTF-8') 7321 :check_reserved: list of adapters to check tablenames and column names 7322 against sql/nosql reserved keywords. (Default None) 7323 7324 * 'common' List of sql keywords that are common to all database types 7325 such as "SELECT, INSERT". (recommended) 7326 * 'all' Checks against all known SQL keywords. (not recommended) 7327 <adaptername> Checks against the specific adapters list of keywords 7328 (recommended) 7329 * '<adaptername>_nonreserved' Checks against the specific adapters 7330 list of nonreserved keywords. (if available) 7331 :migrate (defaults to True) sets default migrate behavior for all tables 7332 :fake_migrate (defaults to False) sets default fake_migrate behavior for all tables 7333 :migrate_enabled (defaults to True). If set to False disables ALL migrations 7334 :fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables 7335 :attempts (defaults to 5). Number of times to attempt connecting 7336 :auto_import (defaults to False). If set, import automatically table definitions from the 7337 databases folder 7338 :bigint_id (defaults to False): If set, turn on bigint instead of int for id fields 7339 :lazy_tables (defaults to False): delay table definition until table access 7340 :after_connection (defaults to None): a callable that will be execute after the connection 7341 """ 7342 7343 items = None 7344 if isinstance(uri, dict): 7345 if "items" in uri: 7346 items = uri.pop("items") 7347 try: 7348 newuri = uri.pop("uri") 7349 except KeyError: 7350 newuri = DEFAULT_URI 7351 locals().update(uri) 7352 uri = newuri 7353 7354 if uri == '<zombie>' and db_uid is not None: return 7355 if not decode_credentials: 7356 credential_decoder = lambda cred: cred 7357 else: 7358 credential_decoder = lambda cred: urllib.unquote(cred) 7359 self._folder = folder 7360 if folder: 7361 self.set_folder(folder) 7362 self._uri = uri 7363 self._pool_size = pool_size 7364 self._db_codec = db_codec 7365 self._lastsql = '' 7366 self._timings = [] 7367 self._pending_references = {} 7368 self._request_tenant = 'request_tenant' 7369 self._common_fields = [] 7370 self._referee_name = '%(table)s' 7371 self._bigint_id = bigint_id 7372 self._debug = debug 7373 self._migrated = [] 7374 self._LAZY_TABLES = {} 7375 self._lazy_tables = lazy_tables 7376 self._tables = SQLCallableList() 7377 self._driver_args = driver_args 7378 self._adapter_args = adapter_args 7379 self._check_reserved = check_reserved 7380 self._decode_credentials = decode_credentials 7381 self._attempts = attempts 7382 self._do_connect = do_connect 7383 7384 if not str(attempts).isdigit() or attempts < 0: 7385 attempts = 5 7386 if uri: 7387 uris = isinstance(uri,(list,tuple)) and uri or [uri] 7388 error = '' 7389 connected = False 7390 for k in range(attempts): 7391 for uri in uris: 7392 try: 7393 if is_jdbc and not uri.startswith('jdbc:'): 7394 uri = 'jdbc:'+uri 7395 self._dbname = REGEX_DBNAME.match(uri).group() 7396 if not self._dbname in ADAPTERS: 7397 raise SyntaxError("Error in URI '%s' or database not supported" % self._dbname) 7398 # notice that driver args or {} else driver_args 7399 # defaults to {} global, not correct 7400 kwargs = dict(db=self,uri=uri, 7401 pool_size=pool_size, 7402 folder=folder, 7403 db_codec=db_codec, 7404 credential_decoder=credential_decoder, 7405 driver_args=driver_args or {}, 7406 adapter_args=adapter_args or {}, 7407 do_connect=do_connect, 7408 after_connection=after_connection) 7409 self._adapter = ADAPTERS[self._dbname](**kwargs) 7410 types = ADAPTERS[self._dbname].types 7411 # copy so multiple DAL() possible 7412 self._adapter.types = copy.copy(types) 7413 if bigint_id: 7414 if 'big-id' in types and 'reference' in types: 7415 self._adapter.types['id'] = types['big-id'] 7416 self._adapter.types['reference'] = types['big-reference'] 7417 connected = True 7418 break 7419 except SyntaxError: 7420 raise 7421 except Exception: 7422 tb = traceback.format_exc() 7423 sys.stderr.write('DEBUG: connect attempt %i, connection error:\n%s' % (k, tb)) 7424 if connected: 7425 break 7426 else: 7427 time.sleep(1) 7428 if not connected: 7429 raise RuntimeError("Failure to connect, tried %d times:\n%s" % (attempts, tb)) 7430 else: 7431 self._adapter = BaseAdapter(db=self,pool_size=0, 7432 uri='None',folder=folder, 7433 db_codec=db_codec, after_connection=after_connection) 7434 migrate = fake_migrate = False 7435 adapter = self._adapter 7436 self._uri_hash = hashlib_md5(adapter.uri).hexdigest() 7437 self.check_reserved = check_reserved 7438 if self.check_reserved: 7439 from reserved_sql_keywords import ADAPTERS as RSK 7440 self.RSK = RSK 7441 self._migrate = migrate 7442 self._fake_migrate = fake_migrate 7443 self._migrate_enabled = migrate_enabled 7444 self._fake_migrate_all = fake_migrate_all 7445 if auto_import or items: 7446 self.import_table_definitions(adapter.folder, 7447 items=items)
7448 7449 @property
7450 - def tables(self):
7451 return self._tables
7452
7453 - def import_table_definitions(self, path, migrate=False, 7454 fake_migrate=False, items=None):
7455 pattern = pjoin(path,self._uri_hash+'_*.table') 7456 if items: 7457 for tablename, table in items.iteritems(): 7458 # TODO: read all field/table options 7459 fields = [] 7460 # remove unsupported/illegal Table arguments 7461 [table.pop(name) for name in ("name", "fields") if 7462 name in table] 7463 if "items" in table: 7464 for fieldname, field in table.pop("items").iteritems(): 7465 # remove unsupported/illegal Field arguments 7466 [field.pop(key) for key in ("requires", "name", 7467 "compute", "colname") if key in field] 7468 fields.append(Field(str(fieldname), **field)) 7469 self.define_table(str(tablename), *fields, **table) 7470 else: 7471 for filename in glob.glob(pattern): 7472 tfile = self._adapter.file_open(filename, 'r') 7473 try: 7474 sql_fields = pickle.load(tfile) 7475 name = filename[len(pattern)-7:-6] 7476 mf = [(value['sortable'], 7477 Field(key, 7478 type=value['type'], 7479 length=value.get('length',None), 7480 notnull=value.get('notnull',False), 7481 unique=value.get('unique',False))) \ 7482 for key, value in sql_fields.iteritems()] 7483 mf.sort(lambda a,b: cmp(a[0],b[0])) 7484 self.define_table(name,*[item[1] for item in mf], 7485 **dict(migrate=migrate, 7486 fake_migrate=fake_migrate)) 7487 finally: 7488 self._adapter.file_close(tfile)
7489
7490 - def check_reserved_keyword(self, name):
7491 """ 7492 Validates ``name`` against SQL keywords 7493 Uses self.check_reserve which is a list of 7494 operators to use. 7495 self.check_reserved 7496 ['common', 'postgres', 'mysql'] 7497 self.check_reserved 7498 ['all'] 7499 """ 7500 for backend in self.check_reserved: 7501 if name.upper() in self.RSK[backend]: 7502 raise SyntaxError( 7503 'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword' % (name, backend.upper()))
7504
7505 - def parse_as_rest(self,patterns,args,vars,queries=None,nested_select=True):
7506 """ 7507 EXAMPLE: 7508 7509 db.define_table('person',Field('name'),Field('info')) 7510 db.define_table('pet',Field('ownedby',db.person),Field('name'),Field('info')) 7511 7512 @request.restful() 7513 def index(): 7514 def GET(*args,**vars): 7515 patterns = [ 7516 "/friends[person]", 7517 "/{person.name}/:field", 7518 "/{person.name}/pets[pet.ownedby]", 7519 "/{person.name}/pets[pet.ownedby]/{pet.name}", 7520 "/{person.name}/pets[pet.ownedby]/{pet.name}/:field", 7521 ("/dogs[pet]", db.pet.info=='dog'), 7522 ("/dogs[pet]/{pet.name.startswith}", db.pet.info=='dog'), 7523 ] 7524 parser = db.parse_as_rest(patterns,args,vars) 7525 if parser.status == 200: 7526 return dict(content=parser.response) 7527 else: 7528 raise HTTP(parser.status,parser.error) 7529 7530 def POST(table_name,**vars): 7531 if table_name == 'person': 7532 return db.person.validate_and_insert(**vars) 7533 elif table_name == 'pet': 7534 return db.pet.validate_and_insert(**vars) 7535 else: 7536 raise HTTP(400) 7537 return locals() 7538 """ 7539 7540 db = self 7541 re1 = REGEX_SEARCH_PATTERN 7542 re2 = REGEX_SQUARE_BRACKETS 7543 7544 def auto_table(table,base='',depth=0): 7545 patterns = [] 7546 for field in db[table].fields: 7547 if base: 7548 tag = '%s/%s' % (base,field.replace('_','-')) 7549 else: 7550 tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-')) 7551 f = db[table][field] 7552 if not f.readable: continue 7553 if f.type=='id' or 'slug' in field or f.type.startswith('reference'): 7554 tag += '/{%s.%s}' % (table,field) 7555 patterns.append(tag) 7556 patterns.append(tag+'/:field') 7557 elif f.type.startswith('boolean'): 7558 tag += '/{%s.%s}' % (table,field) 7559 patterns.append(tag) 7560 patterns.append(tag+'/:field') 7561 elif f.type in ('float','double','integer','bigint'): 7562 tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field) 7563 patterns.append(tag) 7564 patterns.append(tag+'/:field') 7565 elif f.type.startswith('list:'): 7566 tag += '/{%s.%s.contains}' % (table,field) 7567 patterns.append(tag) 7568 patterns.append(tag+'/:field') 7569 elif f.type in ('date','datetime'): 7570 tag+= '/{%s.%s.year}' % (table,field) 7571 patterns.append(tag) 7572 patterns.append(tag+'/:field') 7573 tag+='/{%s.%s.month}' % (table,field) 7574 patterns.append(tag) 7575 patterns.append(tag+'/:field') 7576 tag+='/{%s.%s.day}' % (table,field) 7577 patterns.append(tag) 7578 patterns.append(tag+'/:field') 7579 if f.type in ('datetime','time'): 7580 tag+= '/{%s.%s.hour}' % (table,field) 7581 patterns.append(tag) 7582 patterns.append(tag+'/:field') 7583 tag+='/{%s.%s.minute}' % (table,field) 7584 patterns.append(tag) 7585 patterns.append(tag+'/:field') 7586 tag+='/{%s.%s.second}' % (table,field) 7587 patterns.append(tag) 7588 patterns.append(tag+'/:field') 7589 if depth>0: 7590 for f in db[table]._referenced_by: 7591 tag+='/%s[%s.%s]' % (table,f.tablename,f.name) 7592 patterns.append(tag) 7593 patterns += auto_table(table,base=tag,depth=depth-1) 7594 return patterns
7595 7596 if patterns == 'auto': 7597 patterns=[] 7598 for table in db.tables: 7599 if not table.startswith('auth_'): 7600 patterns.append('/%s[%s]' % (table,table)) 7601 patterns += auto_table(table,base='',depth=1) 7602 else: 7603 i = 0 7604 while i<len(patterns): 7605 pattern = patterns[i] 7606 if not isinstance(pattern,str): 7607 pattern = pattern[0] 7608 tokens = pattern.split('/') 7609 if tokens[-1].startswith(':auto') and re2.match(tokens[-1]): 7610 new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1], 7611 '/'.join(tokens[:-1])) 7612 patterns = patterns[:i]+new_patterns+patterns[i+1:] 7613 i += len(new_patterns) 7614 else: 7615 i += 1 7616 if '/'.join(args) == 'patterns': 7617 return Row({'status':200,'pattern':'list', 7618 'error':None,'response':patterns}) 7619 for pattern in patterns: 7620 basequery, exposedfields = None, [] 7621 if isinstance(pattern,tuple): 7622 if len(pattern)==2: 7623 pattern, basequery = pattern 7624 elif len(pattern)>2: 7625 pattern, basequery, exposedfields = pattern[0:3] 7626 otable=table=None 7627 if not isinstance(queries,dict): 7628 dbset=db(queries) 7629 if basequery is not None: 7630 dbset = dbset(basequery) 7631 i=0 7632 tags = pattern[1:].split('/') 7633 if len(tags)!=len(args): 7634 continue 7635 for tag in tags: 7636 if re1.match(tag): 7637 # print 're1:'+tag 7638 tokens = tag[1:-1].split('.') 7639 table, field = tokens[0], tokens[1] 7640 if not otable or table == otable: 7641 if len(tokens)==2 or tokens[2]=='eq': 7642 query = db[table][field]==args[i] 7643 elif tokens[2]=='ne': 7644 query = db[table][field]!=args[i] 7645 elif tokens[2]=='lt': 7646 query = db[table][field]<args[i] 7647 elif tokens[2]=='gt': 7648 query = db[table][field]>args[i] 7649 elif tokens[2]=='ge': 7650 query = db[table][field]>=args[i] 7651 elif tokens[2]=='le': 7652 query = db[table][field]<=args[i] 7653 elif tokens[2]=='year': 7654 query = db[table][field].year()==args[i] 7655 elif tokens[2]=='month': 7656 query = db[table][field].month()==args[i] 7657 elif tokens[2]=='day': 7658 query = db[table][field].day()==args[i] 7659 elif tokens[2]=='hour': 7660 query = db[table][field].hour()==args[i] 7661 elif tokens[2]=='minute': 7662 query = db[table][field].minutes()==args[i] 7663 elif tokens[2]=='second': 7664 query = db[table][field].seconds()==args[i] 7665 elif tokens[2]=='startswith': 7666 query = db[table][field].startswith(args[i]) 7667 elif tokens[2]=='contains': 7668 query = db[table][field].contains(args[i]) 7669 else: 7670 raise RuntimeError("invalid pattern: %s" % pattern) 7671 if len(tokens)==4 and tokens[3]=='not': 7672 query = ~query 7673 elif len(tokens)>=4: 7674 raise RuntimeError("invalid pattern: %s" % pattern) 7675 if not otable and isinstance(queries,dict): 7676 dbset = db(queries[table]) 7677 if basequery is not None: 7678 dbset = dbset(basequery) 7679 dbset=dbset(query) 7680 else: 7681 raise RuntimeError("missing relation in pattern: %s" % pattern) 7682 elif re2.match(tag) and args[i]==tag[:tag.find('[')]: 7683 ref = tag[tag.find('[')+1:-1] 7684 if '.' in ref and otable: 7685 table,field = ref.split('.') 7686 selfld = '_id' 7687 if db[table][field].type.startswith('reference '): 7688 refs = [ x.name for x in db[otable] if x.type == db[table][field].type ] 7689 else: 7690 refs = [ x.name for x in db[table]._referenced_by if x.tablename==otable ] 7691 if refs: 7692 selfld = refs[0] 7693 if nested_select: 7694 try: 7695 dbset=db(db[table][field].belongs(dbset._select(db[otable][selfld]))) 7696 except ValueError: 7697 return Row({'status':400,'pattern':pattern, 7698 'error':'invalid path','response':None}) 7699 else: 7700 items = [item.id for item in dbset.select(db[otable][selfld])] 7701 dbset=db(db[table][field].belongs(items)) 7702 else: 7703 table = ref 7704 if not otable and isinstance(queries,dict): 7705 dbset = db(queries[table]) 7706 dbset=dbset(db[table]) 7707 elif tag==':field' and table: 7708 # print 're3:'+tag 7709 field = args[i] 7710 if not field in db[table]: break 7711 # hand-built patterns should respect .readable=False as well 7712 if not db[table][field].readable: 7713 return Row({'status':418,'pattern':pattern, 7714 'error':'I\'m a teapot','response':None}) 7715 try: 7716 distinct = vars.get('distinct', False) == 'True' 7717 offset = long(vars.get('offset',None) or 0) 7718 limits = (offset,long(vars.get('limit',None) or 1000)+offset) 7719 except ValueError: 7720 return Row({'status':400,'error':'invalid limits','response':None}) 7721 items = dbset.select(db[table][field], distinct=distinct, limitby=limits) 7722 if items: 7723 return Row({'status':200,'response':items, 7724 'pattern':pattern}) 7725 else: 7726 return Row({'status':404,'pattern':pattern, 7727 'error':'no record found','response':None}) 7728 elif tag != args[i]: 7729 break 7730 otable = table 7731 i += 1 7732 if i==len(tags) and table: 7733 ofields = vars.get('order',db[table]._id.name).split('|') 7734 try: 7735 orderby = [db[table][f] if not f.startswith('~') else ~db[table][f[1:]] for f in ofields] 7736 except (KeyError, AttributeError): 7737 return Row({'status':400,'error':'invalid orderby','response':None}) 7738 if exposedfields: 7739 fields = [field for field in db[table] if str(field).split('.')[-1] in exposedfields and field.readable] 7740 else: 7741 fields = [field for field in db[table] if field.readable] 7742 count = dbset.count() 7743 try: 7744 offset = long(vars.get('offset',None) or 0) 7745 limits = (offset,long(vars.get('limit',None) or 1000)+offset) 7746 except ValueError: 7747 return Row({'status':400,'error':'invalid limits','response':None}) 7748 if count > limits[1]-limits[0]: 7749 return Row({'status':400,'error':'too many records','response':None}) 7750 try: 7751 response = dbset.select(limitby=limits,orderby=orderby,*fields) 7752 except ValueError: 7753 return Row({'status':400,'pattern':pattern, 7754 'error':'invalid path','response':None}) 7755 return Row({'status':200,'response':response, 7756 'pattern':pattern,'count':count}) 7757 return Row({'status':400,'error':'no matching pattern','response':None})
7758
7759 - def define_table( 7760 self, 7761 tablename, 7762 *fields, 7763 **args 7764 ):
7765 if not isinstance(tablename,str): 7766 raise SyntaxError("missing table name") 7767 elif hasattr(self,tablename) or tablename in self.tables: 7768 if not args.get('redefine',False): 7769 raise SyntaxError('table already defined: %s' % tablename) 7770 elif tablename.startswith('_') or hasattr(self,tablename) or \ 7771 REGEX_PYTHON_KEYWORDS.match(tablename): 7772 raise SyntaxError('invalid table name: %s' % tablename) 7773 elif self.check_reserved: 7774 self.check_reserved_keyword(tablename) 7775 else: 7776 invalid_args = set(args)-TABLE_ARGS 7777 if invalid_args: 7778 raise SyntaxError('invalid table "%s" attributes: %s' \ 7779 % (tablename,invalid_args)) 7780 if self._lazy_tables and not tablename in self._LAZY_TABLES: 7781 self._LAZY_TABLES[tablename] = (tablename,fields,args) 7782 table = None 7783 else: 7784 table = self.lazy_define_table(tablename,*fields,**args) 7785 if not tablename in self.tables: 7786 self.tables.append(tablename) 7787 return table
7788
7789 - def lazy_define_table( 7790 self, 7791 tablename, 7792 *fields, 7793 **args 7794 ):
7795 args_get = args.get 7796 common_fields = self._common_fields 7797 if common_fields: 7798 fields = list(fields) + list(common_fields) 7799 7800 table_class = args_get('table_class',Table) 7801 table = table_class(self, tablename, *fields, **args) 7802 table._actual = True 7803 self[tablename] = table 7804 # must follow above line to handle self references 7805 table._create_references() 7806 for field in table: 7807 if field.requires == DEFAULT: 7808 field.requires = sqlhtml_validators(field) 7809 7810 migrate = self._migrate_enabled and args_get('migrate',self._migrate) 7811 if migrate and not self._uri in (None,'None') \ 7812 or self._adapter.dbengine=='google:datastore': 7813 fake_migrate = self._fake_migrate_all or \ 7814 args_get('fake_migrate',self._fake_migrate) 7815 polymodel = args_get('polymodel',None) 7816 try: 7817 GLOBAL_LOCKER.acquire() 7818 self._lastsql = self._adapter.create_table( 7819 table,migrate=migrate, 7820 fake_migrate=fake_migrate, 7821 polymodel=polymodel) 7822 finally: 7823 GLOBAL_LOCKER.release() 7824 else: 7825 table._dbt = None 7826 on_define = args_get('on_define',None) 7827 if on_define: on_define(table) 7828 return table
7829
7830 - def as_dict(self, flat=False, sanitize=True, field_options=True):
7831 dbname = db_uid = uri = None 7832 if not sanitize: 7833 uri, dbname, db_uid = (self._uri, self._dbname, self._db_uid) 7834 db_as_dict = dict(items={}, tables=[], uri=uri, dbname=dbname, 7835 db_uid=db_uid, 7836 **dict([(k, getattr(self, "_" + k)) for 7837 k in 'pool_size','folder','db_codec', 7838 'check_reserved','migrate','fake_migrate', 7839 'migrate_enabled','fake_migrate_all', 7840 'decode_credentials','driver_args', 7841 'adapter_args', 'attempts', 7842 'bigint_id','debug','lazy_tables', 7843 'do_connect'])) 7844 7845 for table in self: 7846 tablename = str(table) 7847 db_as_dict["tables"].append(tablename) 7848 db_as_dict["items"][tablename] = table.as_dict(flat=flat, 7849 sanitize=sanitize, 7850 field_options=field_options) 7851 return db_as_dict
7852
7853 - def as_xml(self, sanitize=True, field_options=True):
7854 if not have_serializers: 7855 raise ImportError("No xml serializers available") 7856 d = self.as_dict(flat=True, sanitize=sanitize, 7857 field_options=field_options) 7858 return serializers.xml(d)
7859
7860 - def as_json(self, sanitize=True, field_options=True):
7861 if not have_serializers: 7862 raise ImportError("No json serializers available") 7863 d = self.as_dict(flat=True, sanitize=sanitize, 7864 field_options=field_options) 7865 return serializers.json(d)
7866
7867 - def as_yaml(self, sanitize=True, field_options=True):
7868 if not have_serializers: 7869 raise ImportError("No YAML serializers available") 7870 d = self.as_dict(flat=True, sanitize=sanitize, 7871 field_options=field_options) 7872 return serializers.yaml(d)
7873
7874 - def __contains__(self, tablename):
7875 try: 7876 return tablename in self.tables 7877 except AttributeError: 7878 # The instance has no .tables attribute yet 7879 return False
7880 7881 has_key = __contains__ 7882
7883 - def get(self,key,default=None):
7884 return self.__dict__.get(key,default)
7885
7886 - def __iter__(self):
7887 for tablename in self.tables: 7888 yield self[tablename]
7889
7890 - def __getitem__(self, key):
7891 return self.__getattr__(str(key))
7892
7893 - def __getattr__(self, key):
7894 if ogetattr(self,'_lazy_tables') and \ 7895 key in ogetattr(self,'_LAZY_TABLES'): 7896 tablename, fields, args = self._LAZY_TABLES.pop(key) 7897 return self.lazy_define_table(tablename,*fields,**args) 7898 return ogetattr(self, key)
7899
7900 - def __setitem__(self, key, value):
7901 osetattr(self, str(key), value)
7902
7903 - def __setattr__(self, key, value):
7904 if key[:1]!='_' and key in self: 7905 raise SyntaxError( 7906 'Object %s exists and cannot be redefined' % key) 7907 osetattr(self,key,value)
7908 7909 __delitem__ = object.__delattr__ 7910
7911 - def __repr__(self):
7912 if hasattr(self,'_uri'): 7913 return '<DAL uri="%s">' % hide_password(str(self._uri)) 7914 else: 7915 return '<DAL db_uid="%s">' % self._db_uid
7916
7917 - def smart_query(self,fields,text):
7918 return Set(self, smart_query(fields,text))
7919
7920 - def __call__(self, query=None, ignore_common_filters=None):
7921 if isinstance(query,Table): 7922 query = self._adapter.id_query(query) 7923 elif isinstance(query,Field): 7924 query = query!=None 7925 elif isinstance(query, dict): 7926 icf = query.get("ignore_common_filters") 7927 if icf: ignore_common_filters = icf 7928 return Set(self, query, ignore_common_filters=ignore_common_filters)
7929
7930 - def commit(self):
7931 self._adapter.commit()
7932
7933 - def rollback(self):
7934 self._adapter.rollback()
7935
7936 - def close(self):
7937 self._adapter.close() 7938 if self._db_uid in THREAD_LOCAL.db_instances: 7939 db_group = THREAD_LOCAL.db_instances[self._db_uid] 7940 db_group.remove(self) 7941 if not db_group: 7942 del THREAD_LOCAL.db_instances[self._db_uid]
7943
7944 - def executesql(self, query, placeholders=None, as_dict=False, 7945 fields=None, colnames=None):
7946 """ 7947 placeholders is optional and will always be None. 7948 If using raw SQL with placeholders, placeholders may be 7949 a sequence of values to be substituted in 7950 or, (if supported by the DB driver), a dictionary with keys 7951 matching named placeholders in your SQL. 7952 7953 Added 2009-12-05 "as_dict" optional argument. Will always be 7954 None when using DAL. If using raw SQL can be set to True 7955 and the results cursor returned by the DB driver will be 7956 converted to a sequence of dictionaries keyed with the db 7957 field names. Tested with SQLite but should work with any database 7958 since the cursor.description used to get field names is part of the 7959 Python dbi 2.0 specs. Results returned with as_dict=True are 7960 the same as those returned when applying .to_list() to a DAL query. 7961 7962 [{field1: value1, field2: value2}, {field1: value1b, field2: value2b}] 7963 7964 Added 2012-08-24 "fields" and "colnames" optional arguments. If either 7965 is provided, the results cursor returned by the DB driver will be 7966 converted to a DAL Rows object using the db._adapter.parse() method. 7967 7968 The "fields" argument is a list of DAL Field objects that match the 7969 fields returned from the DB. The Field objects should be part of one or 7970 more Table objects defined on the DAL object. The "fields" list can 7971 include one or more DAL Table objects in addition to or instead of 7972 including Field objects, or it can be just a single table (not in a 7973 list). In that case, the Field objects will be extracted from the 7974 table(s). 7975 7976 Instead of specifying the "fields" argument, the "colnames" argument 7977 can be specified as a list of field names in tablename.fieldname format. 7978 Again, these should represent tables and fields defined on the DAL 7979 object. 7980 7981 It is also possible to specify both "fields" and the associated 7982 "colnames". In that case, "fields" can also include DAL Expression 7983 objects in addition to Field objects. For Field objects in "fields", 7984 the associated "colnames" must still be in tablename.fieldname format. 7985 For Expression objects in "fields", the associated "colnames" can 7986 be any arbitrary labels. 7987 7988 Note, the DAL Table objects referred to by "fields" or "colnames" can 7989 be dummy tables and do not have to represent any real tables in the 7990 database. Also, note that the "fields" and "colnames" must be in the 7991 same order as the fields in the results cursor returned from the DB. 7992 """ 7993 adapter = self._adapter 7994 if placeholders: 7995 adapter.execute(query, placeholders) 7996 else: 7997 adapter.execute(query) 7998 if as_dict: 7999 if not hasattr(adapter.cursor,'description'): 8000 raise RuntimeError("database does not support executesql(...,as_dict=True)") 8001 # Non-DAL legacy db query, converts cursor results to dict. 8002 # sequence of 7-item sequences. each sequence tells about a column. 8003 # first item is always the field name according to Python Database API specs 8004 columns = adapter.cursor.description 8005 # reduce the column info down to just the field names 8006 fields = [f[0] for f in columns] 8007 # will hold our finished resultset in a list 8008 data = adapter._fetchall() 8009 # convert the list for each row into a dictionary so it's 8010 # easier to work with. row['field_name'] rather than row[0] 8011 return [dict(zip(fields,row)) for row in data] 8012 try: 8013 data = adapter._fetchall() 8014 except: 8015 return None 8016 if fields or colnames: 8017 fields = [] if fields is None else fields 8018 if not isinstance(fields, list): 8019 fields = [fields] 8020 extracted_fields = [] 8021 for field in fields: 8022 if isinstance(field, Table): 8023 extracted_fields.extend([f for f in field]) 8024 else: 8025 extracted_fields.append(field) 8026 if not colnames: 8027 colnames = ['%s.%s' % (f.tablename, f.name) 8028 for f in extracted_fields] 8029 data = adapter.parse( 8030 data, fields=extracted_fields, colnames=colnames) 8031 return data
8032
8033 - def _remove_references_to(self, thistable):
8034 for table in self: 8035 table._referenced_by = [field for field in table._referenced_by 8036 if not field.table==thistable]
8037
8038 - def export_to_csv_file(self, ofile, *args, **kwargs):
8039 step = long(kwargs.get('max_fetch_rows,',500)) 8040 write_colnames = kwargs['write_colnames'] = \ 8041 kwargs.get("write_colnames", True) 8042 for table in self.tables: 8043 ofile.write('TABLE %s\r\n' % table) 8044 query = self._adapter.id_query(self[table]) 8045 nrows = self(query).count() 8046 kwargs['write_colnames'] = write_colnames 8047 for k in range(0,nrows,step): 8048 self(query).select(limitby=(k,k+step)).export_to_csv_file( 8049 ofile, *args, **kwargs) 8050 kwargs['write_colnames'] = False 8051 ofile.write('\r\n\r\n') 8052 ofile.write('END')
8053
8054 - def import_from_csv_file(self, ifile, id_map=None, null='<NULL>', 8055 unique='uuid', map_tablenames=None, 8056 ignore_missing_tables=False, 8057 *args, **kwargs):
8058 #if id_map is None: id_map={} 8059 id_offset = {} # only used if id_map is None 8060 map_tablenames = map_tablenames or {} 8061 for line in ifile: 8062 line = line.strip() 8063 if not line: 8064 continue 8065 elif line == 'END': 8066 return 8067 elif not line.startswith('TABLE ') or \ 8068 not line[6:] in self.tables: 8069 raise SyntaxError('invalid file format') 8070 else: 8071 tablename = line[6:] 8072 tablename = map_tablenames.get(tablename,tablename) 8073 if tablename is not None and tablename in self.tables: 8074 self[tablename].import_from_csv_file( 8075 ifile, id_map, null, unique, id_offset, 8076 *args, **kwargs) 8077 elif tablename is None or ignore_missing_tables: 8078 # skip all non-empty lines 8079 for line in ifile: 8080 if not line.strip(): 8081 breal 8082 else: 8083 raise RuntimeError("Unable to import table that does not exist.\nTry db.import_from_csv_file(..., map_tablenames={'table':'othertable'},ignore_missing_tables=True)")
8084
8085 8086 -def DAL_unpickler(db_uid):
8087 return DAL('<zombie>',db_uid=db_uid)
8088
8089 -def DAL_pickler(db):
8090 return DAL_unpickler, (db._db_uid,)
8091 8092 copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)
8093 8094 -class SQLALL(object):
8095 """ 8096 Helper class providing a comma-separated string having all the field names 8097 (prefixed by table name and '.') 8098 8099 normally only called from within gluon.sql 8100 """ 8101
8102 - def __init__(self, table):
8103 self._table = table
8104
8105 - def __str__(self):
8106 return ', '.join([str(field) for field in self._table])
8107
8108 # class Reference(int): 8109 -class Reference(long):
8110
8111 - def __allocate(self):
8112 if not self._record: 8113 self._record = self._table[long(self)] 8114 if not self._record: 8115 raise RuntimeError( 8116 "Using a recursive select but encountered a broken reference: %s %d"%(self._table, long(self)))
8117
8118 - def __getattr__(self, key):
8119 if key == 'id': 8120 return long(self) 8121 self.__allocate() 8122 return self._record.get(key, None)
8123
8124 - def get(self, key, default=None):
8125 return self.__getattr__(key, default)
8126
8127 - def __setattr__(self, key, value):
8128 if key.startswith('_'): 8129 long.__setattr__(self, key, value) 8130 return 8131 self.__allocate() 8132 self._record[key] = value
8133
8134 - def __getitem__(self, key):
8135 if key == 'id': 8136 return long(self) 8137 self.__allocate() 8138 return self._record.get(key, None)
8139
8140 - def __setitem__(self,key,value):
8141 self.__allocate() 8142 self._record[key] = value
8143
8144 8145 -def Reference_unpickler(data):
8146 return marshal.loads(data)
8147
8148 -def Reference_pickler(data):
8149 try: 8150 marshal_dump = marshal.dumps(long(data)) 8151 except AttributeError: 8152 marshal_dump = 'i%s' % struct.pack('<i', long(data)) 8153 return (Reference_unpickler, (marshal_dump,))
8154 8155 copyreg.pickle(Reference, Reference_pickler, Reference_unpickler)
8156 8157 -class MethodAdder(object):
8158 - def __init__(self,table):
8159 self.table = table
8160 - def __call__(self):
8161 return self.register()
8162 - def __getattr__(self,method_name):
8163 return self.register(method_name)
8164 - def register(self,method_name=None):
8165 def _decorated(f): 8166 instance = self.table 8167 import types 8168 method = types.MethodType(f, instance, instance.__class__) 8169 name = method_name or f.func_name 8170 setattr(instance, name, method) 8171 return f
8172 return _decorated
8173
8174 -class Table(object):
8175 8176 """ 8177 an instance of this class represents a database table 8178 8179 Example:: 8180 8181 db = DAL(...) 8182 db.define_table('users', Field('name')) 8183 db.users.insert(name='me') # print db.users._insert(...) to see SQL 8184 db.users.drop() 8185 """ 8186
8187 - def __init__( 8188 self, 8189 db, 8190 tablename, 8191 *fields, 8192 **args 8193 ):
8194 """ 8195 Initializes the table and performs checking on the provided fields. 8196 8197 Each table will have automatically an 'id'. 8198 8199 If a field is of type Table, the fields (excluding 'id') from that table 8200 will be used instead. 8201 8202 :raises SyntaxError: when a supplied field is of incorrect type. 8203 """ 8204 self._actual = False # set to True by define_table() 8205 self._tablename = tablename 8206 self._ot = args.get('actual_name') 8207 self._sequence_name = args.get('sequence_name') or \ 8208 db and db._adapter.sequence_name(tablename) 8209 self._trigger_name = args.get('trigger_name') or \ 8210 db and db._adapter.trigger_name(tablename) 8211 self._common_filter = args.get('common_filter') 8212 self._format = args.get('format') 8213 self._singular = args.get( 8214 'singular',tablename.replace('_',' ').capitalize()) 8215 self._plural = args.get( 8216 'plural',pluralize(self._singular.lower()).capitalize()) 8217 # horrible but for backard compatibility of appamdin: 8218 if 'primarykey' in args and args['primarykey'] is not None: 8219 self._primarykey = args.get('primarykey') 8220 8221 self._before_insert = [] 8222 self._before_update = [Set.delete_uploaded_files] 8223 self._before_delete = [Set.delete_uploaded_files] 8224 self._after_insert = [] 8225 self._after_update = [] 8226 self._after_delete = [] 8227 8228 self.add_method = MethodAdder(self) 8229 8230 fieldnames,newfields=set(),[] 8231 if hasattr(self,'_primarykey'): 8232 if not isinstance(self._primarykey,list): 8233 raise SyntaxError( 8234 "primarykey must be a list of fields from table '%s'" \ 8235 % tablename) 8236 if len(self._primarykey)==1: 8237 self._id = [f for f in fields if isinstance(f,Field) \ 8238 and f.name==self._primarykey[0]][0] 8239 elif not [f for f in fields if isinstance(f,Field) and f.type=='id']: 8240 field = Field('id', 'id') 8241 newfields.append(field) 8242 fieldnames.add('id') 8243 self._id = field 8244 virtual_fields = [] 8245 for field in fields: 8246 if isinstance(field, (FieldMethod, FieldVirtual)): 8247 virtual_fields.append(field) 8248 elif isinstance(field, Field) and not field.name in fieldnames: 8249 if field.db is not None: 8250 field = copy.copy(field) 8251 newfields.append(field) 8252 fieldnames.add(field.name) 8253 if field.type=='id': 8254 self._id = field 8255 elif isinstance(field, Table): 8256 table = field 8257 for field in table: 8258 if not field.name in fieldnames and not field.type=='id': 8259 t2 = not table._actual and self._tablename 8260 field = field.clone(point_self_references_to=t2) 8261 newfields.append(field) 8262 fieldnames.add(field.name) 8263 elif not isinstance(field, (Field, Table)): 8264 raise SyntaxError( 8265 'define_table argument is not a Field or Table: %s' % field) 8266 fields = newfields 8267 self._db = db 8268 tablename = tablename 8269 self._fields = SQLCallableList() 8270 self.virtualfields = [] 8271 fields = list(fields) 8272 8273 if db and db._adapter.uploads_in_blob==True: 8274 uploadfields = [f.name for f in fields if f.type=='blob'] 8275 for field in fields: 8276 fn = field.uploadfield 8277 if isinstance(field, Field) and field.type == 'upload'\ 8278 and fn is True: 8279 fn = field.uploadfield = '%s_blob' % field.name 8280 if isinstance(fn,str) and not fn in uploadfields: 8281 fields.append(Field(fn,'blob',default='', 8282 writable=False,readable=False)) 8283 8284 lower_fieldnames = set() 8285 reserved = dir(Table) + ['fields'] 8286 for field in fields: 8287 field_name = field.name 8288 if db and db.check_reserved: 8289 db.check_reserved_keyword(field_name) 8290 elif field_name in reserved: 8291 raise SyntaxError("field name %s not allowed" % field_name) 8292 8293 if field_name.lower() in lower_fieldnames: 8294 raise SyntaxError("duplicate field %s in table %s" \ 8295 % (field_name, tablename)) 8296 else: 8297 lower_fieldnames.add(field_name.lower()) 8298 8299 self.fields.append(field_name) 8300 self[field_name] = field 8301 if field.type == 'id': 8302 self['id'] = field 8303 field.tablename = field._tablename = tablename 8304 field.table = field._table = self 8305 field.db = field._db = db 8306 self.ALL = SQLALL(self) 8307 8308 if hasattr(self,'_primarykey'): 8309 for k in self._primarykey: 8310 if k not in self.fields: 8311 raise SyntaxError( 8312 "primarykey must be a list of fields from table '%s " % tablename) 8313 else: 8314 self[k].notnull = True 8315 for field in virtual_fields: 8316 self[field.name] = field
8317 8318 @property
8319 - def fields(self):
8320 return self._fields
8321
8322 - def update(self,*args,**kwargs):
8323 raise RuntimeError("Syntax Not Supported")
8324
8325 - def _enable_record_versioning(self, 8326 archive_db=None, 8327 archive_name = '%(tablename)s_archive', 8328 current_record = 'current_record', 8329 is_active = 'is_active'):
8330 db = self._db 8331 archive_db = archive_db or db 8332 archive_name = archive_name % dict(tablename=self._tablename) 8333 if archive_name in archive_db.tables(): 8334 return # do not try define the archive if already exists 8335 fieldnames = self.fields() 8336 same_db = archive_db is db 8337 field_type = self if same_db else 'bigint' 8338 clones = [] 8339 for field in self: 8340 nfk = same_db or not field.type.startswith('reference') 8341 clones.append(field.clone( 8342 unique=False, type=field.type if nfk else 'bigint')) 8343 archive_db.define_table( 8344 archive_name, Field(current_record,field_type), *clones) 8345 self._before_update.append( 8346 lambda qset,fs,db=archive_db,an=archive_name,cn=current_record: 8347 archive_record(qset,fs,db[an],cn)) 8348 if is_active and is_active in fieldnames: 8349 self._before_delete.append( 8350 lambda qset: qset.update(is_active=False)) 8351 newquery = lambda query, t=self: \ 8352 reduce(AND,[db[tn].is_active == True 8353 for tn in db._adapter.tables(query) 8354 if tn==t.name or getattr(db[tn],'_ot',None)==t.name]) 8355 query = self._common_filter 8356 if query: 8357 newquery = query & newquery 8358 self._common_filter = newquery
8359
8360 - def _validate(self,**vars):
8361 errors = Row() 8362 for key,value in vars.iteritems(): 8363 value,error = self[key].validate(value) 8364 if error: 8365 errors[key] = error 8366 return errors
8367
8368 - def _create_references(self):
8369 db = self._db 8370 pr = db._pending_references 8371 self._referenced_by = [] 8372 for field in self: 8373 fieldname = field.name 8374 field_type = field.type 8375 if isinstance(field_type,str) and field_type[:10] == 'reference ': 8376 ref = field_type[10:].strip() 8377 if not ref.split(): 8378 raise SyntaxError('Table: reference to nothing: %s' %ref) 8379 refs = ref.split('.') 8380 rtablename = refs[0] 8381 if not rtablename in db: 8382 pr[rtablename] = pr.get(rtablename,[]) + [field] 8383 continue 8384 rtable = db[rtablename] 8385 if len(refs)==2: 8386 rfieldname = refs[1] 8387 if not hasattr(rtable,'_primarykey'): 8388 raise SyntaxError( 8389 'keyed tables can only reference other keyed tables (for now)') 8390 if rfieldname not in rtable.fields: 8391 raise SyntaxError( 8392 "invalid field '%s' for referenced table '%s' in table '%s'" \ 8393 % (rfieldname, rtablename, self._tablename)) 8394 rtable._referenced_by.append(field) 8395 for referee in pr.get(self._tablename,[]): 8396 self._referenced_by.append(referee)
8397
8398 - def _filter_fields(self, record, id=False):
8399 return dict([(k, v) for (k, v) in record.iteritems() if k 8400 in self.fields and (self[k].type!='id' or id)])
8401
8402 - def _build_query(self,key):
8403 """ for keyed table only """ 8404 query = None 8405 for k,v in key.iteritems(): 8406 if k in self._primarykey: 8407 if query: 8408 query = query & (self[k] == v) 8409 else: 8410 query = (self[k] == v) 8411 else: 8412 raise SyntaxError( 8413 'Field %s is not part of the primary key of %s' % \ 8414 (k,self._tablename)) 8415 return query
8416
8417 - def __getitem__(self, key):
8418 if not key: 8419 return None 8420 elif isinstance(key, dict): 8421 """ for keyed table """ 8422 query = self._build_query(key) 8423 return self._db(query).select(limitby=(0,1), orderby_on_limitby=False).first() 8424 elif str(key).isdigit() or 'google' in DRIVERS and isinstance(key, Key): 8425 return self._db(self._id == key).select(limitby=(0,1), orderby_on_limitby=False).first() 8426 elif key: 8427 return ogetattr(self, str(key))
8428
8429 - def __call__(self, key=DEFAULT, **kwargs):
8430 for_update = kwargs.get('_for_update',False) 8431 if '_for_update' in kwargs: del kwargs['_for_update'] 8432 8433 orderby = kwargs.get('_orderby',None) 8434 if '_orderby' in kwargs: del kwargs['_orderby'] 8435 8436 if not key is DEFAULT: 8437 if isinstance(key, Query): 8438 record = self._db(key).select( 8439 limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8440 elif not str(key).isdigit(): 8441 record = None 8442 else: 8443 record = self._db(self._id == key).select( 8444 limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8445 if record: 8446 for k,v in kwargs.iteritems(): 8447 if record[k]!=v: return None 8448 return record 8449 elif kwargs: 8450 query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.iteritems()]) 8451 return self._db(query).select(limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8452 else: 8453 return None
8454
8455 - def __setitem__(self, key, value):
8456 if isinstance(key, dict) and isinstance(value, dict): 8457 """ option for keyed table """ 8458 if set(key.keys()) == set(self._primarykey): 8459 value = self._filter_fields(value) 8460 kv = {} 8461 kv.update(value) 8462 kv.update(key) 8463 if not self.insert(**kv): 8464 query = self._build_query(key) 8465 self._db(query).update(**self._filter_fields(value)) 8466 else: 8467 raise SyntaxError( 8468 'key must have all fields from primary key: %s'%\ 8469 (self._primarykey)) 8470 elif str(key).isdigit(): 8471 if key == 0: 8472 self.insert(**self._filter_fields(value)) 8473 elif self._db(self._id == key)\ 8474 .update(**self._filter_fields(value)) is None: 8475 raise SyntaxError('No such record: %s' % key) 8476 else: 8477 if isinstance(key, dict): 8478 raise SyntaxError( 8479 'value must be a dictionary: %s' % value) 8480 osetattr(self, str(key), value)
8481 8482 __getattr__ = __getitem__ 8483
8484 - def __setattr__(self, key, value):
8485 if key[:1]!='_' and key in self: 8486 raise SyntaxError('Object exists and cannot be redefined: %s' % key) 8487 osetattr(self,key,value)
8488
8489 - def __delitem__(self, key):
8490 if isinstance(key, dict): 8491 query = self._build_query(key) 8492 if not self._db(query).delete(): 8493 raise SyntaxError('No such record: %s' % key) 8494 elif not str(key).isdigit() or \ 8495 not self._db(self._id == key).delete(): 8496 raise SyntaxError('No such record: %s' % key)
8497
8498 - def __contains__(self,key):
8499 return hasattr(self,key)
8500 8501 has_key = __contains__ 8502
8503 - def items(self):
8504 return self.__dict__.items()
8505
8506 - def __iter__(self):
8507 for fieldname in self.fields: 8508 yield self[fieldname]
8509
8510 - def iteritems(self):
8511 return self.__dict__.iteritems()
8512 8513
8514 - def __repr__(self):
8515 return '<Table %s (%s)>' % (self._tablename,','.join(self.fields()))
8516
8517 - def __str__(self):
8518 if self._ot is not None: 8519 ot = self._db._adapter.QUOTE_TEMPLATE % self._ot 8520 if 'Oracle' in str(type(self._db._adapter)): 8521 return '%s %s' % (ot, self._tablename) 8522 return '%s AS %s' % (ot, self._tablename) 8523 return self._tablename
8524
8525 - def _drop(self, mode = ''):
8526 return self._db._adapter._drop(self, mode)
8527
8528 - def drop(self, mode = ''):
8529 return self._db._adapter.drop(self,mode)
8530
8531 - def _listify(self,fields,update=False):
8532 new_fields = {} # format: new_fields[name] = (field,value) 8533 8534 # store all fields passed as input in new_fields 8535 for name in fields: 8536 if not name in self.fields: 8537 if name != 'id': 8538 raise SyntaxError( 8539 'Field %s does not belong to the table' % name) 8540 else: 8541 field = self[name] 8542 value = fields[name] 8543 if field.filter_in: 8544 value = field.filter_in(value) 8545 new_fields[name] = (field,value) 8546 8547 # check all fields that should be in the table but are not passed 8548 to_compute = [] 8549 for ofield in self: 8550 name = ofield.name 8551 if not name in new_fields: 8552 # if field is supposed to be computed, compute it! 8553 if ofield.compute: # save those to compute for later 8554 to_compute.append((name,ofield)) 8555 # if field is required, check its default value 8556 elif not update and not ofield.default is None: 8557 value = ofield.default 8558 fields[name] = value 8559 new_fields[name] = (ofield,value) 8560 # if this is an update, user the update field instead 8561 elif update and not ofield.update is None: 8562 value = ofield.update 8563 fields[name] = value 8564 new_fields[name] = (ofield,value) 8565 # if the field is still not there but it should, error 8566 elif not update and ofield.required: 8567 raise RuntimeError( 8568 'Table: missing required field: %s' % name) 8569 # now deal with fields that are supposed to be computed 8570 if to_compute: 8571 row = Row(fields) 8572 for name,ofield in to_compute: 8573 # try compute it 8574 try: 8575 row[name] = new_value = ofield.compute(row) 8576 new_fields[name] = (ofield, new_value) 8577 except (KeyError, AttributeError): 8578 # error silently unless field is required! 8579 if ofield.required: 8580 raise SyntaxError('unable to compute field: %s' % name) 8581 return new_fields.values()
8582
8583 - def _attempt_upload(self, fields):
8584 for field in self: 8585 if field.type=='upload' and field.name in fields: 8586 value = fields[field.name] 8587 if value and not isinstance(value,str): 8588 if hasattr(value,'file') and hasattr(value,'filename'): 8589 new_name = field.store(value.file,filename=value.filename) 8590 elif hasattr(value,'read') and hasattr(value,'name'): 8591 new_name = field.store(value,filename=value.name) 8592 else: 8593 raise RuntimeError("Unable to handle upload") 8594 fields[field.name] = new_name
8595
8596 - def _defaults(self, fields):
8597 "If there are no fields/values specified, return table defaults" 8598 if not fields: 8599 fields = {} 8600 for field in self: 8601 if field.type != "id": 8602 fields[field.name] = field.default 8603 return fields
8604
8605 - def _insert(self, **fields):
8606 fields = self._defaults(fields) 8607 return self._db._adapter._insert(self, self._listify(fields))
8608
8609 - def insert(self, **fields):
8610 fields = self._defaults(fields) 8611 self._attempt_upload(fields) 8612 if any(f(fields) for f in self._before_insert): return 0 8613 ret = self._db._adapter.insert(self, self._listify(fields)) 8614 if ret and self._after_insert: 8615 fields = Row(fields) 8616 [f(fields,ret) for f in self._after_insert] 8617 return ret
8618
8619 - def validate_and_insert(self,**fields):
8620 response = Row() 8621 response.errors = Row() 8622 new_fields = copy.copy(fields) 8623 for key,value in fields.iteritems(): 8624 value,error = self[key].validate(value) 8625 if error: 8626 response.errors[key] = "%s" % error 8627 else: 8628 new_fields[key] = value 8629 if not response.errors: 8630 response.id = self.insert(**new_fields) 8631 else: 8632 response.id = None 8633 return response
8634
8635 - def update_or_insert(self, _key=DEFAULT, **values):
8636 if _key is DEFAULT: 8637 record = self(**values) 8638 elif isinstance(_key,dict): 8639 record = self(**_key) 8640 else: 8641 record = self(_key) 8642 if record: 8643 record.update_record(**values) 8644 newid = None 8645 else: 8646 newid = self.insert(**values) 8647 return newid
8648
8649 - def bulk_insert(self, items):
8650 """ 8651 here items is a list of dictionaries 8652 """ 8653 items = [self._listify(item) for item in items] 8654 if any(f(item) for item in items for f in self._before_insert):return 0 8655 ret = self._db._adapter.bulk_insert(self,items) 8656 ret and [[f(item,ret[k]) for k,item in enumerate(items)] for f in self._after_insert] 8657 return ret
8658
8659 - def _truncate(self, mode = None):
8660 return self._db._adapter._truncate(self, mode)
8661
8662 - def truncate(self, mode = None):
8663 return self._db._adapter.truncate(self, mode)
8664
8665 - def import_from_csv_file( 8666 self, 8667 csvfile, 8668 id_map=None, 8669 null='<NULL>', 8670 unique='uuid', 8671 id_offset=None, # id_offset used only when id_map is None 8672 *args, **kwargs 8673 ):
8674 """ 8675 Import records from csv file. 8676 Column headers must have same names as table fields. 8677 Field 'id' is ignored. 8678 If column names read 'table.file' the 'table.' prefix is ignored. 8679 'unique' argument is a field which must be unique 8680 (typically a uuid field) 8681 'restore' argument is default False; 8682 if set True will remove old values in table first. 8683 'id_map' ff set to None will not map ids. 8684 The import will keep the id numbers in the restored table. 8685 This assumes that there is an field of type id that 8686 is integer and in incrementing order. 8687 Will keep the id numbers in restored table. 8688 """ 8689 8690 delimiter = kwargs.get('delimiter', ',') 8691 quotechar = kwargs.get('quotechar', '"') 8692 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 8693 restore = kwargs.get('restore', False) 8694 if restore: 8695 self._db[self].truncate() 8696 8697 reader = csv.reader(csvfile, delimiter=delimiter, 8698 quotechar=quotechar, quoting=quoting) 8699 colnames = None 8700 if isinstance(id_map, dict): 8701 if not self._tablename in id_map: 8702 id_map[self._tablename] = {} 8703 id_map_self = id_map[self._tablename] 8704 8705 def fix(field, value, id_map, id_offset): 8706 list_reference_s='list:reference' 8707 if value == null: 8708 value = None 8709 elif field.type=='blob': 8710 value = base64.b64decode(value) 8711 elif field.type=='double' or field.type=='float': 8712 if not value.strip(): 8713 value = None 8714 else: 8715 value = float(value) 8716 elif field.type in ('integer','bigint'): 8717 if not value.strip(): 8718 value = None 8719 else: 8720 value = long(value) 8721 elif field.type.startswith('list:string'): 8722 value = bar_decode_string(value) 8723 elif field.type.startswith(list_reference_s): 8724 ref_table = field.type[len(list_reference_s):].strip() 8725 if id_map is not None: 8726 value = [id_map[ref_table][long(v)] \ 8727 for v in bar_decode_string(value)] 8728 else: 8729 value = [v for v in bar_decode_string(value)] 8730 elif field.type.startswith('list:'): 8731 value = bar_decode_integer(value) 8732 elif id_map and field.type.startswith('reference'): 8733 try: 8734 value = id_map[field.type[9:].strip()][long(value)] 8735 except KeyError: 8736 pass 8737 elif id_offset and field.type.startswith('reference'): 8738 try: 8739 value = id_offset[field.type[9:].strip()]+long(value) 8740 except KeyError: 8741 pass 8742 return (field.name, value)
8743 8744 def is_id(colname): 8745 if colname in self: 8746 return self[colname].type == 'id' 8747 else: 8748 return False
8749 8750 first = True 8751 unique_idx = None 8752 for line in reader: 8753 if not line: 8754 break 8755 if not colnames: 8756 colnames = [x.split('.',1)[-1] for x in line][:len(line)] 8757 cols, cid = [], None 8758 for i,colname in enumerate(colnames): 8759 if is_id(colname): 8760 cid = i 8761 else: 8762 cols.append(i) 8763 if colname == unique: 8764 unique_idx = i 8765 else: 8766 items = [fix(self[colnames[i]], line[i], id_map, id_offset) \ 8767 for i in cols if colnames[i] in self.fields] 8768 8769 if not id_map and cid is not None and id_offset is not None and not unique_idx: 8770 csv_id = long(line[cid]) 8771 curr_id = self.insert(**dict(items)) 8772 if first: 8773 first = False 8774 # First curr_id is bigger than csv_id, 8775 # then we are not restoring but 8776 # extending db table with csv db table 8777 if curr_id>csv_id: 8778 id_offset[self._tablename] = curr_id-csv_id 8779 else: 8780 id_offset[self._tablename] = 0 8781 # create new id until we get the same as old_id+offset 8782 while curr_id<csv_id+id_offset[self._tablename]: 8783 self._db(self._db[self][colnames[cid]] == curr_id).delete() 8784 curr_id = self.insert(**dict(items)) 8785 # Validation. Check for duplicate of 'unique' &, 8786 # if present, update instead of insert. 8787 elif not unique_idx: 8788 new_id = self.insert(**dict(items)) 8789 else: 8790 unique_value = line[unique_idx] 8791 query = self._db[self][unique] == unique_value 8792 record = self._db(query).select().first() 8793 if record: 8794 record.update_record(**dict(items)) 8795 new_id = record[self._id.name] 8796 else: 8797 new_id = self.insert(**dict(items)) 8798 if id_map and cid is not None: 8799 id_map_self[long(line[cid])] = new_id 8800
8801 - def as_dict(self, flat=False, sanitize=True, field_options=True):
8802 tablename = str(self) 8803 table_as_dict = dict(name=tablename, items={}, fields=[], 8804 sequence_name=self._sequence_name, 8805 trigger_name=self._trigger_name, 8806 common_filter=self._common_filter, format=self._format, 8807 singular=self._singular, plural=self._plural) 8808 8809 for field in self: 8810 if (field.readable or field.writable) or (not sanitize): 8811 table_as_dict["fields"].append(field.name) 8812 table_as_dict["items"][field.name] = \ 8813 field.as_dict(flat=flat, sanitize=sanitize, 8814 options=field_options) 8815 return table_as_dict
8816
8817 - def as_xml(self, sanitize=True, field_options=True):
8818 if not have_serializers: 8819 raise ImportError("No xml serializers available") 8820 d = self.as_dict(flat=True, sanitize=sanitize, 8821 field_options=field_options) 8822 return serializers.xml(d)
8823
8824 - def as_json(self, sanitize=True, field_options=True):
8825 if not have_serializers: 8826 raise ImportError("No json serializers available") 8827 d = self.as_dict(flat=True, sanitize=sanitize, 8828 field_options=field_options) 8829 return serializers.json(d)
8830
8831 - def as_yaml(self, sanitize=True, field_options=True):
8832 if not have_serializers: 8833 raise ImportError("No YAML serializers available") 8834 d = self.as_dict(flat=True, sanitize=sanitize, 8835 field_options=field_options) 8836 return serializers.yaml(d)
8837
8838 - def with_alias(self, alias):
8839 return self._db._adapter.alias(self,alias)
8840
8841 - def on(self, query):
8842 return Expression(self._db,self._db._adapter.ON,self,query)
8843
8844 -def archive_record(qset,fs,archive_table,current_record):
8845 tablenames = qset.db._adapter.tables(qset.query) 8846 if len(tablenames)!=1: raise RuntimeError("cannot update join") 8847 table = qset.db[tablenames[0]] 8848 for row in qset.select(): 8849 fields = archive_table._filter_fields(row) 8850 fields[current_record] = row.id 8851 archive_table.insert(**fields) 8852 return False
8853
8854 8855 8856 -class Expression(object):
8857
8858 - def __init__( 8859 self, 8860 db, 8861 op, 8862 first=None, 8863 second=None, 8864 type=None, 8865 **optional_args 8866 ):
8867 8868 self.db = db 8869 self.op = op 8870 self.first = first 8871 self.second = second 8872 self._table = getattr(first,'_table',None) 8873 ### self._tablename = first._tablename ## CHECK 8874 if not type and first and hasattr(first,'type'): 8875 self.type = first.type 8876 else: 8877 self.type = type 8878 self.optional_args = optional_args
8879
8880 - def sum(self):
8881 db = self.db 8882 return Expression(db, db._adapter.AGGREGATE, self, 'SUM', self.type)
8883
8884 - def max(self):
8885 db = self.db 8886 return Expression(db, db._adapter.AGGREGATE, self, 'MAX', self.type)
8887
8888 - def min(self):
8889 db = self.db 8890 return Expression(db, db._adapter.AGGREGATE, self, 'MIN', self.type)
8891
8892 - def len(self):
8893 db = self.db 8894 return Expression(db, db._adapter.LENGTH, self, None, 'integer')
8895
8896 - def avg(self):
8897 db = self.db 8898 return Expression(db, db._adapter.AGGREGATE, self, 'AVG', self.type)
8899
8900 - def abs(self):
8901 db = self.db 8902 return Expression(db, db._adapter.AGGREGATE, self, 'ABS', self.type)
8903
8904 - def lower(self):
8905 db = self.db 8906 return Expression(db, db._adapter.LOWER, self, None, self.type)
8907
8908 - def upper(self):
8909 db = self.db 8910 return Expression(db, db._adapter.UPPER, self, None, self.type)
8911
8912 - def replace(self,a,b):
8913 db = self.db 8914 return Expression(db, db._adapter.REPLACE, self, (a,b), self.type)
8915
8916 - def year(self):
8917 db = self.db 8918 return Expression(db, db._adapter.EXTRACT, self, 'year', 'integer')
8919
8920 - def month(self):
8921 db = self.db 8922 return Expression(db, db._adapter.EXTRACT, self, 'month', 'integer')
8923
8924 - def day(self):
8925 db = self.db 8926 return Expression(db, db._adapter.EXTRACT, self, 'day', 'integer')
8927
8928 - def hour(self):
8929 db = self.db 8930 return Expression(db, db._adapter.EXTRACT, self, 'hour', 'integer')
8931
8932 - def minutes(self):
8933 db = self.db 8934 return Expression(db, db._adapter.EXTRACT, self, 'minute', 'integer')
8935
8936 - def coalesce(self,*others):
8937 db = self.db 8938 return Expression(db, db._adapter.COALESCE, self, others, self.type)
8939
8940 - def coalesce_zero(self):
8941 db = self.db 8942 return Expression(db, db._adapter.COALESCE_ZERO, self, None, self.type)
8943
8944 - def seconds(self):
8945 db = self.db 8946 return Expression(db, db._adapter.EXTRACT, self, 'second', 'integer')
8947
8948 - def epoch(self):
8949 db = self.db 8950 return Expression(db, db._adapter.EPOCH, self, None, 'integer')
8951
8952 - def __getslice__(self, start, stop):
8953 db = self.db 8954 if start < 0: 8955 pos0 = '(%s - %d)' % (self.len(), abs(start) - 1) 8956 else: 8957 pos0 = start + 1 8958 8959 if stop < 0: 8960 length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0) 8961 elif stop == sys.maxint: 8962 length = self.len() 8963 else: 8964 length = '(%s - %s)' % (stop + 1, pos0) 8965 return Expression(db,db._adapter.SUBSTRING, 8966 self, (pos0, length), self.type)
8967
8968 - def __getitem__(self, i):
8969 return self[i:i + 1]
8970
8971 - def __str__(self):
8972 return self.db._adapter.expand(self,self.type)
8973
8974 - def __or__(self, other): # for use in sortby
8975 db = self.db 8976 return Expression(db,db._adapter.COMMA,self,other,self.type)
8977
8978 - def __invert__(self):
8979 db = self.db 8980 if hasattr(self,'_op') and self.op == db._adapter.INVERT: 8981 return self.first 8982 return Expression(db,db._adapter.INVERT,self,type=self.type)
8983
8984 - def __add__(self, other):
8985 db = self.db 8986 return Expression(db,db._adapter.ADD,self,other,self.type)
8987
8988 - def __sub__(self, other):
8989 db = self.db 8990 if self.type in ('integer','bigint'): 8991 result_type = 'integer' 8992 elif self.type in ['date','time','datetime','double','float']: 8993 result_type = 'double' 8994 elif self.type.startswith('decimal('): 8995 result_type = self.type 8996 else: 8997 raise SyntaxError("subtraction operation not supported for type") 8998 return Expression(db,db._adapter.SUB,self,other,result_type)
8999
9000 - def __mul__(self, other):
9001 db = self.db 9002 return Expression(db,db._adapter.MUL,self,other,self.type)
9003
9004 - def __div__(self, other):
9005 db = self.db 9006 return Expression(db,db._adapter.DIV,self,other,self.type)
9007
9008 - def __mod__(self, other):
9009 db = self.db 9010 return Expression(db,db._adapter.MOD,self,other,self.type)
9011
9012 - def __eq__(self, value):
9013 db = self.db 9014 return Query(db, db._adapter.EQ, self, value)
9015
9016 - def __ne__(self, value):
9017 db = self.db 9018 return Query(db, db._adapter.NE, self, value)
9019
9020 - def __lt__(self, value):
9021 db = self.db 9022 return Query(db, db._adapter.LT, self, value)
9023
9024 - def __le__(self, value):
9025 db = self.db 9026 return Query(db, db._adapter.LE, self, value)
9027
9028 - def __gt__(self, value):
9029 db = self.db 9030 return Query(db, db._adapter.GT, self, value)
9031
9032 - def __ge__(self, value):
9033 db = self.db 9034 return Query(db, db._adapter.GE, self, value)
9035
9036 - def like(self, value, case_sensitive=False):
9037 db = self.db 9038 op = case_sensitive and db._adapter.LIKE or db._adapter.ILIKE 9039 return Query(db, op, self, value)
9040
9041 - def regexp(self, value):
9042 db = self.db 9043 return Query(db, db._adapter.REGEXP, self, value)
9044
9045 - def belongs(self, *value):
9046 """ 9047 Accepts the following inputs: 9048 field.belongs(1,2) 9049 field.belongs((1,2)) 9050 field.belongs(query) 9051 9052 Does NOT accept: 9053 field.belongs(1) 9054 """ 9055 db = self.db 9056 if len(value) == 1: 9057 value = value[0] 9058 if isinstance(value,Query): 9059 value = db(value)._select(value.first._table._id) 9060 return Query(db, db._adapter.BELONGS, self, value)
9061
9062 - def startswith(self, value):
9063 db = self.db 9064 if not self.type in ('string', 'text', 'json'): 9065 raise SyntaxError("startswith used with incompatible field type") 9066 return Query(db, db._adapter.STARTSWITH, self, value)
9067
9068 - def endswith(self, value):
9069 db = self.db 9070 if not self.type in ('string', 'text', 'json'): 9071 raise SyntaxError("endswith used with incompatible field type") 9072 return Query(db, db._adapter.ENDSWITH, self, value)
9073
9074 - def contains(self, value, all=False, case_sensitive=False):
9075 """ 9076 The case_sensitive parameters is only useful for PostgreSQL 9077 For other RDMBs it is ignored and contains is always case in-sensitive 9078 For MongoDB and GAE contains is always case sensitive 9079 """ 9080 db = self.db 9081 if isinstance(value,(list, tuple)): 9082 subqueries = [self.contains(str(v).strip(),case_sensitive=case_sensitive) 9083 for v in value if str(v).strip()] 9084 if not subqueries: 9085 return self.contains('') 9086 else: 9087 return reduce(all and AND or OR,subqueries) 9088 if not self.type in ('string', 'text', 'json') and not self.type.startswith('list:'): 9089 raise SyntaxError("contains used with incompatible field type") 9090 return Query(db, db._adapter.CONTAINS, self, value, case_sensitive=case_sensitive)
9091
9092 - def with_alias(self, alias):
9093 db = self.db 9094 return Expression(db, db._adapter.AS, self, alias, self.type)
9095 9096 # GIS expressions 9097
9098 - def st_asgeojson(self, precision=15, options=0, version=1):
9099 return Expression(self.db, self.db._adapter.ST_ASGEOJSON, self, 9100 dict(precision=precision, options=options, 9101 version=version), 'string')
9102
9103 - def st_astext(self):
9104 db = self.db 9105 return Expression(db, db._adapter.ST_ASTEXT, self, type='string')
9106
9107 - def st_x(self):
9108 db = self.db 9109 return Expression(db, db._adapter.ST_X, self, type='string')
9110
9111 - def st_y(self):
9112 db = self.db 9113 return Expression(db, db._adapter.ST_Y, self, type='string')
9114
9115 - def st_distance(self, other):
9116 db = self.db 9117 return Expression(db,db._adapter.ST_DISTANCE,self,other, 'double')
9118
9119 - def st_simplify(self, value):
9120 db = self.db 9121 return Expression(db, db._adapter.ST_SIMPLIFY, self, value, self.type)
9122 9123 # GIS queries 9124
9125 - def st_contains(self, value):
9126 db = self.db 9127 return Query(db, db._adapter.ST_CONTAINS, self, value)
9128
9129 - def st_equals(self, value):
9130 db = self.db 9131 return Query(db, db._adapter.ST_EQUALS, self, value)
9132
9133 - def st_intersects(self, value):
9134 db = self.db 9135 return Query(db, db._adapter.ST_INTERSECTS, self, value)
9136
9137 - def st_overlaps(self, value):
9138 db = self.db 9139 return Query(db, db._adapter.ST_OVERLAPS, self, value)
9140
9141 - def st_touches(self, value):
9142 db = self.db 9143 return Query(db, db._adapter.ST_TOUCHES, self, value)
9144
9145 - def st_within(self, value):
9146 db = self.db 9147 return Query(db, db._adapter.ST_WITHIN, self, value)
9148
9149 # for use in both Query and sortby 9150 9151 9152 -class SQLCustomType(object):
9153 """ 9154 allows defining of custom SQL types 9155 9156 Example:: 9157 9158 decimal = SQLCustomType( 9159 type ='double', 9160 native ='integer', 9161 encoder =(lambda x: int(float(x) * 100)), 9162 decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) 9163 ) 9164 9165 db.define_table( 9166 'example', 9167 Field('value', type=decimal) 9168 ) 9169 9170 :param type: the web2py type (default = 'string') 9171 :param native: the backend type 9172 :param encoder: how to encode the value to store it in the backend 9173 :param decoder: how to decode the value retrieved from the backend 9174 :param validator: what validators to use ( default = None, will use the 9175 default validator for type) 9176 """ 9177
9178 - def __init__( 9179 self, 9180 type='string', 9181 native=None, 9182 encoder=None, 9183 decoder=None, 9184 validator=None, 9185 _class=None, 9186 ):
9187 9188 self.type = type 9189 self.native = native 9190 self.encoder = encoder or (lambda x: x) 9191 self.decoder = decoder or (lambda x: x) 9192 self.validator = validator 9193 self._class = _class or type
9194
9195 - def startswith(self, text=None):
9196 try: 9197 return self.type.startswith(self, text) 9198 except TypeError: 9199 return False
9200
9201 - def __getslice__(self, a=0, b=100):
9202 return None
9203
9204 - def __getitem__(self, i):
9205 return None
9206
9207 - def __str__(self):
9208 return self._class
9209
9210 -class FieldVirtual(object):
9211 - def __init__(self, name, f=None, ftype='string',label=None,table_name=None):
9212 # for backward compatibility 9213 (self.name, self.f) = (name, f) if f else ('unknown', name) 9214 self.type = ftype 9215 self.label = label or self.name.capitalize().replace('_',' ') 9216 self.represent = lambda v,r:v 9217 self.formatter = IDENTITY 9218 self.comment = None 9219 self.readable = True 9220 self.writable = False 9221 self.requires = None 9222 self.widget = None 9223 self.tablename = table_name 9224 self.filter_out = None
9225 - def __str__(self):
9226 return '%s.%s' % (self.tablename, self.name)
9227
9228 -class FieldMethod(object):
9229 - def __init__(self, name, f=None, handler=None):
9230 # for backward compatibility 9231 (self.name, self.f) = (name, f) if f else ('unknown', name) 9232 self.handler = handler
9233
9234 -def list_represent(x,r=None):
9235 return ', '.join(str(y) for y in x or [])
9236
9237 -class Field(Expression):
9238 9239 Virtual = FieldVirtual 9240 Method = FieldMethod 9241 Lazy = FieldMethod # for backward compatibility 9242 9243 """ 9244 an instance of this class represents a database field 9245 9246 example:: 9247 9248 a = Field(name, 'string', length=32, default=None, required=False, 9249 requires=IS_NOT_EMPTY(), ondelete='CASCADE', 9250 notnull=False, unique=False, 9251 uploadfield=True, widget=None, label=None, comment=None, 9252 uploadfield=True, # True means store on disk, 9253 # 'a_field_name' means store in this field in db 9254 # False means file content will be discarded. 9255 writable=True, readable=True, update=None, authorize=None, 9256 autodelete=False, represent=None, uploadfolder=None, 9257 uploadseparate=False # upload to separate directories by uuid_keys 9258 # first 2 character and tablename.fieldname 9259 # False - old behavior 9260 # True - put uploaded file in 9261 # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2] 9262 # directory) 9263 uploadfs=None # a pyfilesystem where to store upload 9264 9265 to be used as argument of DAL.define_table 9266 9267 allowed field types: 9268 string, boolean, integer, double, text, blob, 9269 date, time, datetime, upload, password 9270 9271 """ 9272
9273 - def __init__( 9274 self, 9275 fieldname, 9276 type='string', 9277 length=None, 9278 default=DEFAULT, 9279 required=False, 9280 requires=DEFAULT, 9281 ondelete='CASCADE', 9282 notnull=False, 9283 unique=False, 9284 uploadfield=True, 9285 widget=None, 9286 label=None, 9287 comment=None, 9288 writable=True, 9289 readable=True, 9290 update=None, 9291 authorize=None, 9292 autodelete=False, 9293 represent=None, 9294 uploadfolder=None, 9295 uploadseparate=False, 9296 uploadfs=None, 9297 compute=None, 9298 custom_store=None, 9299 custom_retrieve=None, 9300 custom_retrieve_file_properties=None, 9301 custom_delete=None, 9302 filter_in = None, 9303 filter_out = None, 9304 custom_qualifier = None, 9305 map_none = None, 9306 ):
9307 self._db = self.db = None # both for backward compatibility 9308 self.op = None 9309 self.first = None 9310 self.second = None 9311 self.name = fieldname = cleanup(fieldname) 9312 if not isinstance(fieldname,str) or hasattr(Table,fieldname) or \ 9313 fieldname[0] == '_' or REGEX_PYTHON_KEYWORDS.match(fieldname): 9314 raise SyntaxError('Field: invalid field name: %s' % fieldname) 9315 self.type = type if not isinstance(type, (Table,Field)) else 'reference %s' % type 9316 self.length = length if not length is None else DEFAULTLENGTH.get(self.type,512) 9317 self.default = default if default!=DEFAULT else (update or None) 9318 self.required = required # is this field required 9319 self.ondelete = ondelete.upper() # this is for reference fields only 9320 self.notnull = notnull 9321 self.unique = unique 9322 self.uploadfield = uploadfield 9323 self.uploadfolder = uploadfolder 9324 self.uploadseparate = uploadseparate 9325 self.uploadfs = uploadfs 9326 self.widget = widget 9327 self.comment = comment 9328 self.writable = writable 9329 self.readable = readable 9330 self.update = update 9331 self.authorize = authorize 9332 self.autodelete = autodelete 9333 self.represent = list_represent if \ 9334 represent==None and type in ('list:integer','list:string') else represent 9335 self.compute = compute 9336 self.isattachment = True 9337 self.custom_store = custom_store 9338 self.custom_retrieve = custom_retrieve 9339 self.custom_retrieve_file_properties = custom_retrieve_file_properties 9340 self.custom_delete = custom_delete 9341 self.filter_in = filter_in 9342 self.filter_out = filter_out 9343 self.custom_qualifier = custom_qualifier 9344 self.label = label if label!=None else fieldname.replace('_',' ').title() 9345 self.requires = requires if requires!=None else [] 9346 self.map_none = map_none
9347
9348 - def set_attributes(self,*args,**attributes):
9349 self.__dict__.update(*args,**attributes)
9350
9351 - def clone(self,point_self_references_to=False,**args):
9352 field = copy.copy(self) 9353 if point_self_references_to and \ 9354 field.type == 'reference %s'+field._tablename: 9355 field.type = 'reference %s' % point_self_references_to 9356 field.__dict__.update(args) 9357 return field
9358
9359 - def store(self, file, filename=None, path=None):
9360 if self.custom_store: 9361 return self.custom_store(file,filename,path) 9362 if isinstance(file, cgi.FieldStorage): 9363 filename = filename or file.filename 9364 file = file.file 9365 elif not filename: 9366 filename = file.name 9367 filename = os.path.basename(filename.replace('/', os.sep)\ 9368 .replace('\\', os.sep)) 9369 m = REGEX_STORE_PATTERN.search(filename) 9370 extension = m and m.group('e') or 'txt' 9371 uuid_key = web2py_uuid().replace('-', '')[-16:] 9372 encoded_filename = base64.b16encode(filename).lower() 9373 newfilename = '%s.%s.%s.%s' % \ 9374 (self._tablename, self.name, uuid_key, encoded_filename) 9375 newfilename = newfilename[:(self.length - 1 - len(extension))] + '.' + extension 9376 self_uploadfield = self.uploadfield 9377 if isinstance(self_uploadfield,Field): 9378 blob_uploadfield_name = self_uploadfield.uploadfield 9379 keys={self_uploadfield.name: newfilename, 9380 blob_uploadfield_name: file.read()} 9381 self_uploadfield.table.insert(**keys) 9382 elif self_uploadfield == True: 9383 if path: 9384 pass 9385 elif self.uploadfolder: 9386 path = self.uploadfolder 9387 elif self.db._adapter.folder: 9388 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9389 else: 9390 raise RuntimeError( 9391 "you must specify a Field(...,uploadfolder=...)") 9392 if self.uploadseparate: 9393 if self.uploadfs: 9394 raise RuntimeError("not supported") 9395 path = pjoin(path,"%s.%s" %(self._tablename, self.name), 9396 uuid_key[:2]) 9397 if not exists(path): 9398 os.makedirs(path) 9399 pathfilename = pjoin(path, newfilename) 9400 if self.uploadfs: 9401 dest_file = self.uploadfs.open(newfilename, 'wb') 9402 else: 9403 dest_file = open(pathfilename, 'wb') 9404 try: 9405 shutil.copyfileobj(file, dest_file) 9406 except IOError: 9407 raise IOError( 9408 'Unable to store file "%s" because invalid permissions, readonly file system, or filename too long' % pathfilename) 9409 dest_file.close() 9410 return newfilename
9411
9412 - def retrieve(self, name, path=None, nameonly=False):
9413 """ 9414 if nameonly==True return (filename, fullfilename) instead of 9415 (filename, stream) 9416 """ 9417 self_uploadfield = self.uploadfield 9418 if self.custom_retrieve: 9419 return self.custom_retrieve(name, path) 9420 import http 9421 if self.authorize or isinstance(self_uploadfield, str): 9422 row = self.db(self == name).select().first() 9423 if not row: 9424 raise http.HTTP(404) 9425 if self.authorize and not self.authorize(row): 9426 raise http.HTTP(403) 9427 m = REGEX_UPLOAD_PATTERN.match(name) 9428 if not m or not self.isattachment: 9429 raise TypeError('Can\'t retrieve %s' % name) 9430 file_properties = self.retrieve_file_properties(name,path) 9431 filename = file_properties['filename'] 9432 if isinstance(self_uploadfield, str): # ## if file is in DB 9433 stream = StringIO.StringIO(row[self_uploadfield] or '') 9434 elif isinstance(self_uploadfield,Field): 9435 blob_uploadfield_name = self_uploadfield.uploadfield 9436 query = self_uploadfield == name 9437 data = self_uploadfield.table(query)[blob_uploadfield_name] 9438 stream = StringIO.StringIO(data) 9439 elif self.uploadfs: 9440 # ## if file is on pyfilesystem 9441 stream = self.uploadfs.open(name, 'rb') 9442 else: 9443 # ## if file is on regular filesystem 9444 # this is intentially a sting with filename and not a stream 9445 # this propagates and allows stream_file_or_304_or_206 to be called 9446 fullname = pjoin(file_properties['path'],name) 9447 if nameonly: 9448 return (filename, fullname) 9449 stream = open(fullname,'rb') 9450 return (filename, stream)
9451
9452 - def retrieve_file_properties(self, name, path=None):
9453 self_uploadfield = self.uploadfield 9454 if self.custom_retrieve_file_properties: 9455 return self.custom_retrieve_file_properties(name, path) 9456 try: 9457 m = REGEX_UPLOAD_PATTERN.match(name) 9458 if not m or not self.isattachment: 9459 raise TypeError('Can\'t retrieve %s file properties' % name) 9460 filename = base64.b16decode(m.group('name'), True) 9461 filename = REGEX_CLEANUP_FN.sub('_', filename) 9462 except (TypeError, AttributeError): 9463 filename = name 9464 if isinstance(self_uploadfield, str): # ## if file is in DB 9465 return dict(path=None,filename=filename) 9466 elif isinstance(self_uploadfield,Field): 9467 return dict(path=None,filename=filename) 9468 else: 9469 # ## if file is on filesystem 9470 if path: 9471 pass 9472 elif self.uploadfolder: 9473 path = self.uploadfolder 9474 else: 9475 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9476 if self.uploadseparate: 9477 t = m.group('table') 9478 f = m.group('field') 9479 u = m.group('uuidkey') 9480 path = pjoin(path,"%s.%s" % (t,f),u[:2]) 9481 return dict(path=path,filename=filename)
9482 9483
9484 - def formatter(self, value):
9485 requires = self.requires 9486 if value is None or not requires: 9487 return value or self.map_none 9488 if not isinstance(requires, (list, tuple)): 9489 requires = [requires] 9490 elif isinstance(requires, tuple): 9491 requires = list(requires) 9492 else: 9493 requires = copy.copy(requires) 9494 requires.reverse() 9495 for item in requires: 9496 if hasattr(item, 'formatter'): 9497 value = item.formatter(value) 9498 return value
9499
9500 - def validate(self, value):
9501 if not self.requires or self.requires == DEFAULT: 9502 return ((value if value!=self.map_none else None), None) 9503 requires = self.requires 9504 if not isinstance(requires, (list, tuple)): 9505 requires = [requires] 9506 for validator in requires: 9507 (value, error) = validator(value) 9508 if error: 9509 return (value, error) 9510 return ((value if value!=self.map_none else None), None)
9511
9512 - def count(self, distinct=None):
9513 return Expression(self.db, self.db._adapter.COUNT, self, distinct, 'integer')
9514
9515 - def as_dict(self, flat=False, sanitize=True, options=True):
9516 9517 attrs = ('type', 'length', 'default', 'required', 9518 'ondelete', 'notnull', 'unique', 'uploadfield', 9519 'widget', 'label', 'comment', 'writable', 'readable', 9520 'update', 'authorize', 'autodelete', 'represent', 9521 'uploadfolder', 'uploadseparate', 'uploadfs', 9522 'compute', 'custom_store', 'custom_retrieve', 9523 'custom_retrieve_file_properties', 'custom_delete', 9524 'filter_in', 'filter_out', 'custom_qualifier', 9525 'map_none', 'name') 9526 9527 SERIALIZABLE_TYPES = (int, long, basestring, dict, list, 9528 float, tuple, bool, type(None)) 9529 9530 def flatten(obj): 9531 if flat: 9532 if isinstance(obj, flatten.__class__): 9533 return str(type(obj)) 9534 elif isinstance(obj, type): 9535 try: 9536 return str(obj).split("'")[1] 9537 except IndexError: 9538 return str(obj) 9539 elif not isinstance(obj, SERIALIZABLE_TYPES): 9540 return str(obj) 9541 elif isinstance(obj, dict): 9542 newobj = dict() 9543 for k, v in obj.items(): 9544 newobj[k] = flatten(v) 9545 return newobj 9546 elif isinstance(obj, (list, tuple, set)): 9547 return [flatten(v) for v in obj] 9548 else: 9549 return obj 9550 elif isinstance(obj, (dict, set)): 9551 return obj.copy() 9552 else: return obj
9553 9554 def filter_requires(t, r, options=True): 9555 if sanitize and any([keyword in str(t).upper() for 9556 keyword in ("CRYPT", "IS_STRONG")]): 9557 return None 9558 9559 if not isinstance(r, dict): 9560 if options and hasattr(r, "options"): 9561 if callable(r.options): 9562 r.options() 9563 newr = r.__dict__.copy() 9564 else: 9565 newr = r.copy() 9566 9567 # remove options if not required 9568 if not options and newr.has_key("labels"): 9569 [newr.update({key:None}) for key in 9570 ("labels", "theset") if (key in newr)] 9571 9572 for k, v in newr.items(): 9573 if k == "other": 9574 if isinstance(v, dict): 9575 otype, other = v.popitem() 9576 else: 9577 otype = flatten(type(v)) 9578 other = v 9579 newr[k] = {otype: filter_requires(otype, other, 9580 options=options)} 9581 else: 9582 newr[k] = flatten(v) 9583 return newr
9584 9585 if isinstance(self.requires, (tuple, list, set)): 9586 requires = dict([(flatten(type(r)), 9587 filter_requires(type(r), r, 9588 options=options)) for 9589 r in self.requires]) 9590 else: 9591 requires = {flatten(type(self.requires)): 9592 filter_requires(type(self.requires), 9593 self.requires, options=options)} 9594 9595 d = dict(colname="%s.%s" % (self.tablename, self.name), 9596 requires=requires) 9597 d.update([(attr, flatten(getattr(self, attr))) for attr in attrs]) 9598 return d 9599
9600 - def as_xml(self, sanitize=True, options=True):
9601 if have_serializers: 9602 xml = serializers.xml 9603 else: 9604 raise ImportError("No xml serializers available") 9605 d = self.as_dict(flat=True, sanitize=sanitize, 9606 options=options) 9607 return xml(d)
9608
9609 - def as_json(self, sanitize=True, options=True):
9610 if have_serializers: 9611 json = serializers.json 9612 else: 9613 raise ImportError("No json serializers available") 9614 d = self.as_dict(flat=True, sanitize=sanitize, 9615 options=options) 9616 return json(d)
9617
9618 - def as_yaml(self, sanitize=True, options=True):
9619 if have_serializers: 9620 d = self.as_dict(flat=True, sanitize=sanitize, 9621 options=options) 9622 return serializers.yaml(d) 9623 else: 9624 raise ImportError("No YAML serializers available")
9625
9626 - def __nonzero__(self):
9627 return True
9628
9629 - def __str__(self):
9630 try: 9631 return '%s.%s' % (self.tablename, self.name) 9632 except: 9633 return '<no table>.%s' % self.name
9634
9635 9636 -class Query(object):
9637 9638 """ 9639 a query object necessary to define a set. 9640 it can be stored or can be passed to DAL.__call__() to obtain a Set 9641 9642 Example:: 9643 9644 query = db.users.name=='Max' 9645 set = db(query) 9646 records = set.select() 9647 9648 """ 9649
9650 - def __init__( 9651 self, 9652 db, 9653 op, 9654 first=None, 9655 second=None, 9656 ignore_common_filters = False, 9657 **optional_args 9658 ):
9659 self.db = self._db = db 9660 self.op = op 9661 self.first = first 9662 self.second = second 9663 self.ignore_common_filters = ignore_common_filters 9664 self.optional_args = optional_args
9665
9666 - def __repr__(self):
9667 return '<Query %s>' % BaseAdapter.expand(self.db._adapter,self)
9668
9669 - def __str__(self):
9670 return self.db._adapter.expand(self)
9671
9672 - def __and__(self, other):
9673 return Query(self.db,self.db._adapter.AND,self,other)
9674 9675 __rand__ = __and__ 9676
9677 - def __or__(self, other):
9678 return Query(self.db,self.db._adapter.OR,self,other)
9679 9680 __ror__ = __or__ 9681
9682 - def __invert__(self):
9683 if self.op==self.db._adapter.NOT: 9684 return self.first 9685 return Query(self.db,self.db._adapter.NOT,self)
9686
9687 - def __eq__(self, other):
9688 return repr(self) == repr(other)
9689
9690 - def __ne__(self, other):
9691 return not (self == other)
9692
9693 - def case(self,t=1,f=0):
9694 return self.db._adapter.CASE(self,t,f)
9695
9696 - def as_dict(self, flat=False, sanitize=True):
9697 """Experimental stuff 9698 9699 This allows to return a plain dictionary with the basic 9700 query representation. Can be used with json/xml services 9701 for client-side db I/O 9702 9703 Example: 9704 >>> q = db.auth_user.id != 0 9705 >>> q.as_dict(flat=True) 9706 {"op": "NE", "first":{"tablename": "auth_user", 9707 "fieldname": "id"}, 9708 "second":0} 9709 """ 9710 9711 SERIALIZABLE_TYPES = (tuple, dict, list, int, long, float, 9712 basestring, type(None), bool) 9713 def loop(d): 9714 newd = dict() 9715 for k, v in d.items(): 9716 if k in ("first", "second"): 9717 if isinstance(v, self.__class__): 9718 newd[k] = loop(v.__dict__) 9719 elif isinstance(v, Field): 9720 newd[k] = {"tablename": v._tablename, 9721 "fieldname": v.name} 9722 elif isinstance(v, Expression): 9723 newd[k] = loop(v.__dict__) 9724 elif isinstance(v, SERIALIZABLE_TYPES): 9725 newd[k] = v 9726 elif isinstance(v, (datetime.date, 9727 datetime.time, 9728 datetime.datetime)): 9729 newd[k] = unicode(v) 9730 elif k == "op": 9731 if callable(v): 9732 newd[k] = v.__name__ 9733 elif isinstance(v, basestring): 9734 newd[k] = v 9735 else: pass # not callable or string 9736 elif isinstance(v, SERIALIZABLE_TYPES): 9737 if isinstance(v, dict): 9738 newd[k] = loop(v) 9739 else: newd[k] = v 9740 return newd
9741 9742 if flat: 9743 return loop(self.__dict__) 9744 else: return self.__dict__
9745 9746
9747 - def as_xml(self, sanitize=True):
9748 if have_serializers: 9749 xml = serializers.xml 9750 else: 9751 raise ImportError("No xml serializers available") 9752 d = self.as_dict(flat=True, sanitize=sanitize) 9753 return xml(d)
9754
9755 - def as_json(self, sanitize=True):
9756 if have_serializers: 9757 json = serializers.json 9758 else: 9759 raise ImportError("No json serializers available") 9760 d = self.as_dict(flat=True, sanitize=sanitize) 9761 return json(d)
9762
9763 -def xorify(orderby):
9764 if not orderby: 9765 return None 9766 orderby2 = orderby[0] 9767 for item in orderby[1:]: 9768 orderby2 = orderby2 | item 9769 return orderby2
9770
9771 -def use_common_filters(query):
9772 return (query and hasattr(query,'ignore_common_filters') and \ 9773 not query.ignore_common_filters)
9774
9775 -class Set(object):
9776 9777 """ 9778 a Set represents a set of records in the database, 9779 the records are identified by the query=Query(...) object. 9780 normally the Set is generated by DAL.__call__(Query(...)) 9781 9782 given a set, for example 9783 set = db(db.users.name=='Max') 9784 you can: 9785 set.update(db.users.name='Massimo') 9786 set.delete() # all elements in the set 9787 set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10)) 9788 and take subsets: 9789 subset = set(db.users.id<5) 9790 """ 9791
9792 - def __init__(self, db, query, ignore_common_filters = None):
9793 self.db = db 9794 self._db = db # for backward compatibility 9795 self.dquery = None 9796 9797 # if query is a dict, parse it 9798 if isinstance(query, dict): 9799 query = self.parse(query) 9800 9801 if not ignore_common_filters is None and \ 9802 use_common_filters(query) == ignore_common_filters: 9803 query = copy.copy(query) 9804 query.ignore_common_filters = ignore_common_filters 9805 self.query = query
9806
9807 - def __repr__(self):
9808 return '<Set %s>' % BaseAdapter.expand(self.db._adapter,self.query)
9809
9810 - def __call__(self, query, ignore_common_filters=False):
9811 if query is None: 9812 return self 9813 elif isinstance(query,Table): 9814 query = self.db._adapter.id_query(query) 9815 elif isinstance(query,str): 9816 query = Expression(self.db,query) 9817 elif isinstance(query,Field): 9818 query = query!=None 9819 if self.query: 9820 return Set(self.db, self.query & query, 9821 ignore_common_filters=ignore_common_filters) 9822 else: 9823 return Set(self.db, query, 9824 ignore_common_filters=ignore_common_filters)
9825
9826 - def _count(self,distinct=None):
9827 return self.db._adapter._count(self.query,distinct)
9828
9829 - def _select(self, *fields, **attributes):
9830 adapter = self.db._adapter 9831 tablenames = adapter.tables(self.query, 9832 attributes.get('join',None), 9833 attributes.get('left',None), 9834 attributes.get('orderby',None), 9835 attributes.get('groupby',None)) 9836 fields = adapter.expand_all(fields, tablenames) 9837 return adapter._select(self.query,fields,attributes)
9838
9839 - def _delete(self):
9840 db = self.db 9841 tablename = db._adapter.get_table(self.query) 9842 return db._adapter._delete(tablename,self.query)
9843
9844 - def _update(self, **update_fields):
9845 db = self.db 9846 tablename = db._adapter.get_table(self.query) 9847 fields = db[tablename]._listify(update_fields,update=True) 9848 return db._adapter._update(tablename,self.query,fields)
9849
9850 - def as_dict(self, flat=False, sanitize=True):
9851 if flat: 9852 uid = dbname = uri = None 9853 codec = self.db._db_codec 9854 if not sanitize: 9855 uri, dbname, uid = (self.db._dbname, str(self.db), 9856 self.db._db_uid) 9857 d = {"query": self.query.as_dict(flat=flat)} 9858 d["db"] = {"uid": uid, "codec": codec, 9859 "name": dbname, "uri": uri} 9860 return d 9861 else: return self.__dict__
9862
9863 - def as_xml(self, sanitize=True):
9864 if have_serializers: 9865 xml = serializers.xml 9866 else: 9867 raise ImportError("No xml serializers available") 9868 d = self.as_dict(flat=True, sanitize=sanitize) 9869 return xml(d)
9870
9871 - def as_json(self, sanitize=True):
9872 if have_serializers: 9873 json = serializers.json 9874 else: 9875 raise ImportError("No json serializers available") 9876 d = self.as_dict(flat=True, sanitize=sanitize) 9877 return json(d)
9878
9879 - def parse(self, dquery):
9880 "Experimental: Turn a dictionary into a Query object" 9881 self.dquery = dquery 9882 return self.build(self.dquery)
9883
9884 - def build(self, d):
9885 "Experimental: see .parse()" 9886 op, first, second = (d["op"], d["first"], 9887 d.get("second", None)) 9888 left = right = built = None 9889 9890 if op in ("AND", "OR"): 9891 if not (type(first), type(second)) == (dict, dict): 9892 raise SyntaxError("Invalid AND/OR query") 9893 if op == "AND": 9894 built = self.build(first) & self.build(second) 9895 else: built = self.build(first) | self.build(second) 9896 9897 elif op == "NOT": 9898 if first is None: 9899 raise SyntaxError("Invalid NOT query") 9900 built = ~self.build(first) 9901 else: 9902 # normal operation (GT, EQ, LT, ...) 9903 for k, v in {"left": first, "right": second}.items(): 9904 if isinstance(v, dict) and v.get("op"): 9905 v = self.build(v) 9906 if isinstance(v, dict) and ("tablename" in v): 9907 v = self.db[v["tablename"]][v["fieldname"]] 9908 if k == "left": left = v 9909 else: right = v 9910 9911 if hasattr(self.db._adapter, op): 9912 opm = getattr(self.db._adapter, op) 9913 9914 if op == "EQ": built = left == right 9915 elif op == "NE": built = left != right 9916 elif op == "GT": built = left > right 9917 elif op == "GE": built = left >= right 9918 elif op == "LT": built = left < right 9919 elif op == "LE": built = left <= right 9920 elif op in ("JOIN", "LEFT_JOIN", "RANDOM", "ALLOW_NULL"): 9921 built = Expression(self.db, opm) 9922 elif op in ("LOWER", "UPPER", "EPOCH", "PRIMARY_KEY", 9923 "COALESCE_ZERO", "RAW", "INVERT"): 9924 built = Expression(self.db, opm, left) 9925 elif op in ("COUNT", "EXTRACT", "AGGREGATE", "SUBSTRING", 9926 "REGEXP", "LIKE", "ILIKE", "STARTSWITH", 9927 "ENDSWITH", "ADD", "SUB", "MUL", "DIV", 9928 "MOD", "AS", "ON", "COMMA", "NOT_NULL", 9929 "COALESCE", "CONTAINS", "BELONGS"): 9930 built = Expression(self.db, opm, left, right) 9931 # expression as string 9932 elif not (left or right): built = Expression(self.db, op) 9933 else: 9934 raise SyntaxError("Operator not supported: %s" % op) 9935 9936 return built
9937
9938 - def isempty(self):
9939 return not self.select(limitby=(0,1), orderby_on_limitby=False)
9940
9941 - def count(self,distinct=None, cache=None):
9942 db = self.db 9943 if cache: 9944 cache_model, time_expire = cache 9945 sql = self._count(distinct=distinct) 9946 key = db._uri + '/' + sql 9947 if len(key)>200: key = hashlib_md5(key).hexdigest() 9948 return cache_model( 9949 key, 9950 (lambda self=self,distinct=distinct: \ 9951 db._adapter.count(self.query,distinct)), 9952 time_expire) 9953 return db._adapter.count(self.query,distinct)
9954
9955 - def select(self, *fields, **attributes):
9956 adapter = self.db._adapter 9957 tablenames = adapter.tables(self.query, 9958 attributes.get('join',None), 9959 attributes.get('left',None), 9960 attributes.get('orderby',None), 9961 attributes.get('groupby',None)) 9962 fields = adapter.expand_all(fields, tablenames) 9963 return adapter.select(self.query,fields,attributes)
9964
9965 - def nested_select(self,*fields,**attributes):
9966 return Expression(self.db,self._select(*fields,**attributes))
9967
9968 - def delete(self):
9969 db = self.db 9970 tablename = db._adapter.get_table(self.query) 9971 table = db[tablename] 9972 if any(f(self) for f in table._before_delete): return 0 9973 ret = db._adapter.delete(tablename,self.query) 9974 ret and [f(self) for f in table._after_delete] 9975 return ret
9976
9977 - def update(self, **update_fields):
9978 db = self.db 9979 tablename = db._adapter.get_table(self.query) 9980 table = db[tablename] 9981 table._attempt_upload(update_fields) 9982 if any(f(self,update_fields) for f in table._before_update): 9983 return 0 9984 fields = table._listify(update_fields,update=True) 9985 if not fields: 9986 raise SyntaxError("No fields to update") 9987 ret = db._adapter.update("%s" % table,self.query,fields) 9988 ret and [f(self,update_fields) for f in table._after_update] 9989 return ret
9990
9991 - def update_naive(self, **update_fields):
9992 """ 9993 same as update but does not call table._before_update and _after_update 9994 """ 9995 tablename = self.db._adapter.get_table(self.query) 9996 table = self.db[tablename] 9997 fields = table._listify(update_fields,update=True) 9998 if not fields: raise SyntaxError("No fields to update") 9999 10000 ret = self.db._adapter.update("%s" % table,self.query,fields) 10001 return ret
10002
10003 - def validate_and_update(self, **update_fields):
10004 tablename = self.db._adapter.get_table(self.query) 10005 response = Row() 10006 response.errors = Row() 10007 new_fields = copy.copy(update_fields) 10008 for key,value in update_fields.iteritems(): 10009 value,error = self.db[tablename][key].validate(value) 10010 if error: 10011 response.errors[key] = error 10012 else: 10013 new_fields[key] = value 10014 table = self.db[tablename] 10015 if response.errors: 10016 response.updated = None 10017 else: 10018 if not any(f(self,new_fields) for f in table._before_update): 10019 fields = table._listify(new_fields,update=True) 10020 if not fields: raise SyntaxError("No fields to update") 10021 ret = self.db._adapter.update(tablename,self.query,fields) 10022 ret and [f(self,new_fields) for f in table._after_update] 10023 else: 10024 ret = 0 10025 response.updated = ret 10026 return response
10027
10028 - def delete_uploaded_files(self, upload_fields=None):
10029 table = self.db[self.db._adapter.tables(self.query)[0]] 10030 # ## mind uploadfield==True means file is not in DB 10031 if upload_fields: 10032 fields = upload_fields.keys() 10033 else: 10034 fields = table.fields 10035 fields = [f for f in fields if table[f].type == 'upload' 10036 and table[f].uploadfield == True 10037 and table[f].autodelete] 10038 if not fields: 10039 return False 10040 for record in self.select(*[table[f] for f in fields]): 10041 for fieldname in fields: 10042 field = table[fieldname] 10043 oldname = record.get(fieldname, None) 10044 if not oldname: 10045 continue 10046 if upload_fields and oldname == upload_fields[fieldname]: 10047 continue 10048 if field.custom_delete: 10049 field.custom_delete(oldname) 10050 else: 10051 uploadfolder = field.uploadfolder 10052 if not uploadfolder: 10053 uploadfolder = pjoin( 10054 self.db._adapter.folder, '..', 'uploads') 10055 if field.uploadseparate: 10056 items = oldname.split('.') 10057 uploadfolder = pjoin( 10058 uploadfolder, 10059 "%s.%s" % (items[0], items[1]), 10060 items[2][:2]) 10061 oldpath = pjoin(uploadfolder, oldname) 10062 if exists(oldpath): 10063 os.unlink(oldpath) 10064 return False
10065
10066 -class RecordUpdater(object):
10067 - def __init__(self, colset, table, id):
10068 self.colset, self.db, self.tablename, self.id = \ 10069 colset, table._db, table._tablename, id
10070
10071 - def __call__(self, **fields):
10072 colset, db, tablename, id = self.colset, self.db, self.tablename, self.id 10073 table = db[tablename] 10074 newfields = fields or dict(colset) 10075 for fieldname in newfields.keys(): 10076 if not fieldname in table.fields or table[fieldname].type=='id': 10077 del newfields[fieldname] 10078 table._db(table._id==id,ignore_common_filters=True).update(**newfields) 10079 colset.update(newfields) 10080 return colset
10081
10082 -class RecordDeleter(object):
10083 - def __init__(self, table, id):
10084 self.db, self.tablename, self.id = table._db, table._tablename, id
10085 - def __call__(self):
10086 return self.db(self.db[self.tablename]._id==self.id).delete()
10087
10088 -class LazySet(object):
10089 - def __init__(self, field, id):
10090 self.db, self.tablename, self.fieldname, self.id = \ 10091 field.db, field._tablename, field.name, id
10092 - def _getset(self):
10093 query = self.db[self.tablename][self.fieldname]==self.id 10094 return Set(self.db,query)
10095 - def __repr__(self):
10096 return repr(self._getset())
10097 - def __call__(self, query, ignore_common_filters=False):
10098 return self._getset()(query, ignore_common_filters)
10099 - def _count(self,distinct=None):
10100 return self._getset()._count(distinct)
10101 - def _select(self, *fields, **attributes):
10102 return self._getset()._select(*fields,**attributes)
10103 - def _delete(self):
10104 return self._getset()._delete()
10105 - def _update(self, **update_fields):
10106 return self._getset()._update(**update_fields)
10107 - def isempty(self):
10108 return self._getset().isempty()
10109 - def count(self,distinct=None, cache=None):
10110 return self._getset().count(distinct,cache)
10111 - def select(self, *fields, **attributes):
10112 return self._getset().select(*fields,**attributes)
10113 - def nested_select(self,*fields,**attributes):
10114 return self._getset().nested_select(*fields,**attributes)
10115 - def delete(self):
10116 return self._getset().delete()
10117 - def update(self, **update_fields):
10118 return self._getset().update(**update_fields)
10119 - def update_naive(self, **update_fields):
10120 return self._getset().update_naive(**update_fields)
10121 - def validate_and_update(self, **update_fields):
10122 return self._getset().validate_and_update(**update_fields)
10123 - def delete_uploaded_files(self, upload_fields=None):
10124 return self._getset().delete_uploaded_files(upload_fields)
10125
10126 -class VirtualCommand(object):
10127 - def __init__(self,method,row):
10128 self.method=method 10129 self.row=row
10130 - def __call__(self,*args,**kwargs):
10131 return self.method(self.row,*args,**kwargs)
10132
10133 -def lazy_virtualfield(f):
10134 f.__lazy__ = True 10135 return f
10136
10137 -class Rows(object):
10138 10139 """ 10140 A wrapper for the return value of a select. It basically represents a table. 10141 It has an iterator and each row is represented as a dictionary. 10142 """ 10143 10144 # ## TODO: this class still needs some work to care for ID/OID 10145
10146 - def __init__( 10147 self, 10148 db=None, 10149 records=[], 10150 colnames=[], 10151 compact=True, 10152 rawrows=None 10153 ):
10154 self.db = db 10155 self.records = records 10156 self.colnames = colnames 10157 self.compact = compact 10158 self.response = rawrows
10159
10160 - def __repr__(self):
10161 return '<Rows (%s)>' % len(self.records)
10162
10163 - def setvirtualfields(self,**keyed_virtualfields):
10164 """ 10165 db.define_table('x',Field('number','integer')) 10166 if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)] 10167 10168 from gluon.dal import lazy_virtualfield 10169 10170 class MyVirtualFields(object): 10171 # normal virtual field (backward compatible, discouraged) 10172 def normal_shift(self): return self.x.number+1 10173 # lazy virtual field (because of @staticmethod) 10174 @lazy_virtualfield 10175 def lazy_shift(instance,row,delta=4): return row.x.number+delta 10176 db.x.virtualfields.append(MyVirtualFields()) 10177 10178 for row in db(db.x).select(): 10179 print row.number, row.normal_shift, row.lazy_shift(delta=7) 10180 """ 10181 if not keyed_virtualfields: 10182 return self 10183 for row in self.records: 10184 for (tablename,virtualfields) in keyed_virtualfields.iteritems(): 10185 attributes = dir(virtualfields) 10186 if not tablename in row: 10187 box = row[tablename] = Row() 10188 else: 10189 box = row[tablename] 10190 updated = False 10191 for attribute in attributes: 10192 if attribute[0] != '_': 10193 method = getattr(virtualfields,attribute) 10194 if hasattr(method,'__lazy__'): 10195 box[attribute]=VirtualCommand(method,row) 10196 elif type(method)==types.MethodType: 10197 if not updated: 10198 virtualfields.__dict__.update(row) 10199 updated = True 10200 box[attribute]=method() 10201 return self
10202
10203 - def __and__(self,other):
10204 if self.colnames!=other.colnames: 10205 raise Exception('Cannot & incompatible Rows objects') 10206 records = self.records+other.records 10207 return Rows(self.db,records,self.colnames)
10208
10209 - def __or__(self,other):
10210 if self.colnames!=other.colnames: 10211 raise Exception('Cannot | incompatible Rows objects') 10212 records = self.records 10213 records += [record for record in other.records \ 10214 if not record in records] 10215 return Rows(self.db,records,self.colnames)
10216
10217 - def __nonzero__(self):
10218 if len(self.records): 10219 return 1 10220 return 0
10221
10222 - def __len__(self):
10223 return len(self.records)
10224
10225 - def __getslice__(self, a, b):
10226 return Rows(self.db,self.records[a:b],self.colnames,compact=self.compact)
10227
10228 - def __getitem__(self, i):
10229 row = self.records[i] 10230 keys = row.keys() 10231 if self.compact and len(keys) == 1 and keys[0] != '_extra': 10232 return row[row.keys()[0]] 10233 return row
10234
10235 - def __iter__(self):
10236 """ 10237 iterator over records 10238 """ 10239 10240 for i in xrange(len(self)): 10241 yield self[i]
10242
10243 - def __str__(self):
10244 """ 10245 serializes the table into a csv file 10246 """ 10247 10248 s = StringIO.StringIO() 10249 self.export_to_csv_file(s) 10250 return s.getvalue()
10251
10252 - def first(self):
10253 if not self.records: 10254 return None 10255 return self[0]
10256
10257 - def last(self):
10258 if not self.records: 10259 return None 10260 return self[-1]
10261
10262 - def find(self,f,limitby=None):
10263 """ 10264 returns a new Rows object, a subset of the original object, 10265 filtered by the function f 10266 """ 10267 if not self: 10268 return Rows(self.db, [], self.colnames) 10269 records = [] 10270 if limitby: 10271 a,b = limitby 10272 else: 10273 a,b = 0,len(self) 10274 k = 0 10275 for row in self: 10276 if f(row): 10277 if a<=k: records.append(row) 10278 k += 1 10279 if k==b: break 10280 return Rows(self.db, records, self.colnames)
10281
10282 - def exclude(self, f):
10283 """ 10284 removes elements from the calling Rows object, filtered by the function f, 10285 and returns a new Rows object containing the removed elements 10286 """ 10287 if not self.records: 10288 return Rows(self.db, [], self.colnames) 10289 removed = [] 10290 i=0 10291 while i<len(self): 10292 row = self[i] 10293 if f(row): 10294 removed.append(self.records[i]) 10295 del self.records[i] 10296 else: 10297 i += 1 10298 return Rows(self.db, removed, self.colnames)
10299
10300 - def sort(self, f, reverse=False):
10301 """ 10302 returns a list of sorted elements (not sorted in place) 10303 """ 10304 rows = Rows(self.db,[],self.colnames,compact=False) 10305 rows.records = sorted(self,key=f,reverse=reverse) 10306 return rows
10307 10308
10309 - def group_by_value(self, field):
10310 """ 10311 regroups the rows, by one of the fields 10312 """ 10313 if not self.records: 10314 return {} 10315 key = str(field) 10316 grouped_row_group = dict() 10317 10318 for row in self: 10319 value = row[key] 10320 if not value in grouped_row_group: 10321 grouped_row_group[value] = [row] 10322 else: 10323 grouped_row_group[value].append(row) 10324 return grouped_row_group
10325
10326 - def render(self, i=None, fields=None):
10327 """ 10328 Takes an index and returns a copy of the indexed row with values 10329 transformed via the "represent" attributes of the associated fields. 10330 10331 If no index is specified, a generator is returned for iteration 10332 over all the rows. 10333 10334 fields -- a list of fields to transform (if None, all fields with 10335 "represent" attributes will be transformed). 10336 """ 10337 10338 10339 if i is None: 10340 return (self.repr(i, fields=fields) for i in range(len(self))) 10341 import sqlhtml 10342 row = copy.deepcopy(self.records[i]) 10343 keys = row.keys() 10344 tables = [f.tablename for f in fields] if fields \ 10345 else [k for k in keys if k != '_extra'] 10346 for table in tables: 10347 repr_fields = [f.name for f in fields if f.tablename == table] \ 10348 if fields else [k for k in row[table].keys() 10349 if (hasattr(self.db[table], k) and 10350 isinstance(self.db[table][k], Field) 10351 and self.db[table][k].represent)] 10352 for field in repr_fields: 10353 row[table][field] = sqlhtml.represent( 10354 self.db[table][field], row[table][field], row[table]) 10355 if self.compact and len(keys) == 1 and keys[0] != '_extra': 10356 return row[keys[0]] 10357 return row
10358
10359 - def as_list(self, 10360 compact=True, 10361 storage_to_dict=True, 10362 datetime_to_str=False, 10363 custom_types=None):
10364 """ 10365 returns the data as a list or dictionary. 10366 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10367 :param datetime_to_str: convert datetime fields as strings (default False) 10368 """ 10369 (oc, self.compact) = (self.compact, compact) 10370 if storage_to_dict: 10371 items = [item.as_dict(datetime_to_str, custom_types) for item in self] 10372 else: 10373 items = [item for item in self] 10374 self.compact = compact 10375 return items
10376 10377
10378 - def as_dict(self, 10379 key='id', 10380 compact=True, 10381 storage_to_dict=True, 10382 datetime_to_str=False, 10383 custom_types=None):
10384 """ 10385 returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False) 10386 10387 :param key: the name of the field to be used as dict key, normally the id 10388 :param compact: ? (default True) 10389 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10390 :param datetime_to_str: convert datetime fields as strings (default False) 10391 """ 10392 10393 # test for multiple rows 10394 multi = False 10395 f = self.first() 10396 if f and isinstance(key, basestring): 10397 multi = any([isinstance(v, f.__class__) for v in f.values()]) 10398 if (not "." in key) and multi: 10399 # No key provided, default to int indices 10400 def new_key(): 10401 i = 0 10402 while True: 10403 yield i 10404 i += 1
10405 key_generator = new_key() 10406 key = lambda r: key_generator.next() 10407 10408 rows = self.as_list(compact, storage_to_dict, datetime_to_str, custom_types) 10409 if isinstance(key,str) and key.count('.')==1: 10410 (table, field) = key.split('.') 10411 return dict([(r[table][field],r) for r in rows]) 10412 elif isinstance(key,str): 10413 return dict([(r[key],r) for r in rows]) 10414 else: 10415 return dict([(key(r),r) for r in rows])
10416
10417 - def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs):
10418 """ 10419 export data to csv, the first line contains the column names 10420 10421 :param ofile: where the csv must be exported to 10422 :param null: how null values must be represented (default '<NULL>') 10423 :param delimiter: delimiter to separate values (default ',') 10424 :param quotechar: character to use to quote string values (default '"') 10425 :param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL) 10426 :param represent: use the fields .represent value (default False) 10427 :param colnames: list of column names to use (default self.colnames) 10428 This will only work when exporting rows objects!!!! 10429 DO NOT use this with db.export_to_csv() 10430 """ 10431 delimiter = kwargs.get('delimiter', ',') 10432 quotechar = kwargs.get('quotechar', '"') 10433 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 10434 represent = kwargs.get('represent', False) 10435 writer = csv.writer(ofile, delimiter=delimiter, 10436 quotechar=quotechar, quoting=quoting) 10437 colnames = kwargs.get('colnames', self.colnames) 10438 write_colnames = kwargs.get('write_colnames',True) 10439 # a proper csv starting with the column names 10440 if write_colnames: 10441 writer.writerow(colnames) 10442 10443 def none_exception(value): 10444 """ 10445 returns a cleaned up value that can be used for csv export: 10446 - unicode text is encoded as such 10447 - None values are replaced with the given representation (default <NULL>) 10448 """ 10449 if value is None: 10450 return null 10451 elif isinstance(value, unicode): 10452 return value.encode('utf8') 10453 elif isinstance(value,Reference): 10454 return long(value) 10455 elif hasattr(value, 'isoformat'): 10456 return value.isoformat()[:19].replace('T', ' ') 10457 elif isinstance(value, (list,tuple)): # for type='list:..' 10458 return bar_encode(value) 10459 return value
10460 10461 for record in self: 10462 row = [] 10463 for col in colnames: 10464 if not REGEX_TABLE_DOT_FIELD.match(col): 10465 row.append(record._extra[col]) 10466 else: 10467 (t, f) = col.split('.') 10468 field = self.db[t][f] 10469 if isinstance(record.get(t, None), (Row,dict)): 10470 value = record[t][f] 10471 else: 10472 value = record[f] 10473 if field.type=='blob' and not value is None: 10474 value = base64.b64encode(value) 10475 elif represent and field.represent: 10476 value = field.represent(value) 10477 row.append(none_exception(value)) 10478 writer.writerow(row) 10479
10480 - def xml(self,strict=False,row_name='row',rows_name='rows'):
10481 """ 10482 serializes the table using sqlhtml.SQLTABLE (if present) 10483 """ 10484 10485 if strict: 10486 ncols = len(self.colnames) 10487 return '<%s>\n%s\n</%s>' % (rows_name, 10488 '\n'.join(row.as_xml(row_name=row_name, 10489 colnames=self.colnames) for 10490 row in self), rows_name) 10491 10492 import sqlhtml 10493 return sqlhtml.SQLTABLE(self).xml()
10494
10495 - def as_xml(self,row_name='row',rows_name='rows'):
10496 return self.xml(strict=True, row_name=row_name, rows_name=rows_name)
10497
10498 - def as_json(self, mode='object', default=None):
10499 """ 10500 serializes the rows to a JSON list or object with objects 10501 mode='object' is not implemented (should return a nested 10502 object structure) 10503 """ 10504 10505 items = [record.as_json(mode=mode, default=default, 10506 serialize=False, 10507 colnames=self.colnames) for 10508 record in self] 10509 10510 if have_serializers: 10511 return serializers.json(items, 10512 default=default or 10513 serializers.custom_json) 10514 elif simplejson: 10515 return simplejson.dumps(items) 10516 else: 10517 raise RuntimeError("missing simplejson")
10518 10519 # for consistent naming yet backwards compatible 10520 as_csv = __str__ 10521 json = as_json 10522
10523 10524 ################################################################################ 10525 # dummy function used to define some doctests 10526 ################################################################################ 10527 10528 -def test_all():
10529 """ 10530 10531 >>> if len(sys.argv)<2: db = DAL(\"sqlite://test.db\") 10532 >>> if len(sys.argv)>1: db = DAL(sys.argv[1]) 10533 >>> tmp = db.define_table('users',\ 10534 Field('stringf', 'string', length=32, required=True),\ 10535 Field('booleanf', 'boolean', default=False),\ 10536 Field('passwordf', 'password', notnull=True),\ 10537 Field('uploadf', 'upload'),\ 10538 Field('blobf', 'blob'),\ 10539 Field('integerf', 'integer', unique=True),\ 10540 Field('doublef', 'double', unique=True,notnull=True),\ 10541 Field('jsonf', 'json'),\ 10542 Field('datef', 'date', default=datetime.date.today()),\ 10543 Field('timef', 'time'),\ 10544 Field('datetimef', 'datetime'),\ 10545 migrate='test_user.table') 10546 10547 Insert a field 10548 10549 >>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\ 10550 uploadf=None, integerf=5, doublef=3.14,\ 10551 jsonf={"j": True},\ 10552 datef=datetime.date(2001, 1, 1),\ 10553 timef=datetime.time(12, 30, 15),\ 10554 datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15)) 10555 1 10556 10557 Drop the table 10558 10559 >>> db.users.drop() 10560 10561 Examples of insert, select, update, delete 10562 10563 >>> tmp = db.define_table('person',\ 10564 Field('name'),\ 10565 Field('birth','date'),\ 10566 migrate='test_person.table') 10567 >>> person_id = db.person.insert(name=\"Marco\",birth='2005-06-22') 10568 >>> person_id = db.person.insert(name=\"Massimo\",birth='1971-12-21') 10569 10570 commented len(db().select(db.person.ALL)) 10571 commented 2 10572 10573 >>> me = db(db.person.id==person_id).select()[0] # test select 10574 >>> me.name 10575 'Massimo' 10576 >>> db.person[2].name 10577 'Massimo' 10578 >>> db.person(2).name 10579 'Massimo' 10580 >>> db.person(name='Massimo').name 10581 'Massimo' 10582 >>> db.person(db.person.name=='Massimo').name 10583 'Massimo' 10584 >>> row = db.person[2] 10585 >>> row.name == row['name'] == row['person.name'] == row('person.name') 10586 True 10587 >>> db(db.person.name=='Massimo').update(name='massimo') # test update 10588 1 10589 >>> db(db.person.name=='Marco').select().first().delete_record() # test delete 10590 1 10591 10592 Update a single record 10593 10594 >>> me.update_record(name=\"Max\") 10595 <Row {'name': 'Max', 'birth': datetime.date(1971, 12, 21), 'id': 2}> 10596 >>> me.name 10597 'Max' 10598 10599 Examples of complex search conditions 10600 10601 >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select()) 10602 1 10603 >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select()) 10604 1 10605 >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select()) 10606 1 10607 >>> me = db(db.person.id==person_id).select(db.person.name)[0] 10608 >>> me.name 10609 'Max' 10610 10611 Examples of search conditions using extract from date/datetime/time 10612 10613 >>> len(db(db.person.birth.month()==12).select()) 10614 1 10615 >>> len(db(db.person.birth.year()>1900).select()) 10616 1 10617 10618 Example of usage of NULL 10619 10620 >>> len(db(db.person.birth==None).select()) ### test NULL 10621 0 10622 >>> len(db(db.person.birth!=None).select()) ### test NULL 10623 1 10624 10625 Examples of search conditions using lower, upper, and like 10626 10627 >>> len(db(db.person.name.upper()=='MAX').select()) 10628 1 10629 >>> len(db(db.person.name.like('%ax')).select()) 10630 1 10631 >>> len(db(db.person.name.upper().like('%AX')).select()) 10632 1 10633 >>> len(db(~db.person.name.upper().like('%AX')).select()) 10634 0 10635 10636 orderby, groupby and limitby 10637 10638 >>> people = db().select(db.person.name, orderby=db.person.name) 10639 >>> order = db.person.name|~db.person.birth 10640 >>> people = db().select(db.person.name, orderby=order) 10641 10642 >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name) 10643 10644 >>> people = db().select(db.person.name, orderby=order, limitby=(0,100)) 10645 10646 Example of one 2 many relation 10647 10648 >>> tmp = db.define_table('dog',\ 10649 Field('name'),\ 10650 Field('birth','date'),\ 10651 Field('owner',db.person),\ 10652 migrate='test_dog.table') 10653 >>> db.dog.insert(name='Snoopy', birth=None, owner=person_id) 10654 1 10655 10656 A simple JOIN 10657 10658 >>> len(db(db.dog.owner==db.person.id).select()) 10659 1 10660 10661 >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id))) 10662 1 10663 10664 Drop tables 10665 10666 >>> db.dog.drop() 10667 >>> db.person.drop() 10668 10669 Example of many 2 many relation and Set 10670 10671 >>> tmp = db.define_table('author', Field('name'),\ 10672 migrate='test_author.table') 10673 >>> tmp = db.define_table('paper', Field('title'),\ 10674 migrate='test_paper.table') 10675 >>> tmp = db.define_table('authorship',\ 10676 Field('author_id', db.author),\ 10677 Field('paper_id', db.paper),\ 10678 migrate='test_authorship.table') 10679 >>> aid = db.author.insert(name='Massimo') 10680 >>> pid = db.paper.insert(title='QCD') 10681 >>> tmp = db.authorship.insert(author_id=aid, paper_id=pid) 10682 10683 Define a Set 10684 10685 >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id)) 10686 >>> rows = authored_papers.select(db.author.name, db.paper.title) 10687 >>> for row in rows: print row.author.name, row.paper.title 10688 Massimo QCD 10689 10690 Example of search condition using belongs 10691 10692 >>> set = (1, 2, 3) 10693 >>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL) 10694 >>> print rows[0].title 10695 QCD 10696 10697 Example of search condition using nested select 10698 10699 >>> nested_select = db()._select(db.authorship.paper_id) 10700 >>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL) 10701 >>> print rows[0].title 10702 QCD 10703 10704 Example of expressions 10705 10706 >>> mynumber = db.define_table('mynumber', Field('x', 'integer')) 10707 >>> db(mynumber).delete() 10708 0 10709 >>> for i in range(10): tmp = mynumber.insert(x=i) 10710 >>> db(mynumber).select(mynumber.x.sum())[0](mynumber.x.sum()) 10711 45 10712 10713 >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2) 10714 5 10715 10716 Output in csv 10717 10718 >>> print str(authored_papers.select(db.author.name, db.paper.title)).strip() 10719 author.name,paper.title\r 10720 Massimo,QCD 10721 10722 Delete all leftover tables 10723 10724 >>> DAL.distributed_transaction_commit(db) 10725 10726 >>> db.mynumber.drop() 10727 >>> db.authorship.drop() 10728 >>> db.author.drop() 10729 >>> db.paper.drop() 10730 """
10731 ################################################################################ 10732 # deprecated since the new DAL; here only for backward compatibility 10733 ################################################################################ 10734 10735 SQLField = Field 10736 SQLTable = Table 10737 SQLXorable = Expression 10738 SQLQuery = Query 10739 SQLSet = Set 10740 SQLRows = Rows 10741 SQLStorage = Row 10742 SQLDB = DAL 10743 GQLDB = DAL 10744 DAL.Field = Field # was necessary in gluon/globals.py session.connect 10745 DAL.Table = Table # was necessary in gluon/globals.py session.connect
10746 10747 ################################################################################ 10748 # Geodal utils 10749 ################################################################################ 10750 10751 -def geoPoint(x,y):
10752 return "POINT (%f %f)" % (x,y)
10753
10754 -def geoLine(*line):
10755 return "LINESTRING (%s)" % ','.join("%f %f" % item for item in line)
10756
10757 -def geoPolygon(*line):
10758 return "POLYGON ((%s))" % ','.join("%f %f" % item for item in line)
10759 10760 ################################################################################ 10761 # run tests 10762 ################################################################################ 10763 10764 if __name__ == '__main__': 10765 import doctest 10766 doctest.testmod() 10767